source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
c_fft.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_fft.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: This program computes the Fast Fourier Transform
on an input signal
COMMENTS: The algorithm uses a divide and conquer strategy
and the transform is computed as a combination of the
transforms of the even and odd terms of the original signal.
The code requires nested Parallelism.
Function write_array() is provided only for debuging purposes.
(use a small size signal if you want to write it).
REFERENCES: James W. Cooley and John W. Tukey,
An Algorithm for the Machine Calculation of Complex Fourier Series,
Mathematics of Computation, 1965, vol. 19, no. 90, pg 297-301
http://en.wikipedia.org/wiki/Cooley-Tukey_FFT_algorithm
BASIC PRAGMAS: parallel for
USAGE: ./c_fft.par 8192
INPUT: The size of the input signal
OUTPUT: The code tests the correctness of the result for the input
FILE FORMATS: -
RESTRICTIONS: The size of the input signal MUST be a power of 2
REVISION HISTORY:
**************************************************************************/
//#include "OmpSCR.h"
#include <omp.h>
#include <math.h>
#include <stdio.h>
#define KILO (1024)
#define DEFAULT_SIZE_IN_KB (64)
#define NUM_ARGS 1
#define NUM_TIMERS 1
#define ARG 1
typedef double doubleType;
typedef struct {
doubleType re;
doubleType im;
} Complex;
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a);
void write_array(unsigned Size, Complex *a);
int test_array(unsigned Size, Complex *a);
void FFT(Complex *A, Complex *a, Complex *W, unsigned N, unsigned stride, Complex *D);
void Roots(unsigned Size, Complex *W);
unsigned get_params(int argc, char *argv[]);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Routine: initialize
Description: Initialise a vector of complex numbers
Comment: all numbers have real part 1.0 and imaginary part 0.0
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++) {
a[i].re = 1.0;
a[i].im = 0.0;
}
}
/* -----------------------------------------------------------------------
Routine: write_array
Description: Display a vector of complex numbers
* ----------------------------------------------------------------------- */
void write_array(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++)
printf("a[%2u] = [%.8lf,%.8lf]\n", i, a[i].re, a[i].im);
}
/* -----------------------------------------------------------------------
Routine: test_array
Description: Test is true if the complex vector is of the form
[(Size,0),(0,0),...,(0,0)]
* ----------------------------------------------------------------------- */
int test_array(unsigned Size, Complex *a) {
register unsigned i;
unsigned OK = 1;
if((a[0].re == Size) && (a[0].im == 0)) {
for(i = 1; i < Size; i++)
if (a[i].re != 0.0 || a[i].im != 0.0) {
OK = 0;
break;
}
}
else OK = 0;
return OK;
}
/* -----------------------------------------------------------------------
Procedure: Roots
Description: Computes roots of the Unary
Parameters:
unsigned Size, number of roots to compute
Complex *W, vector containing the roots
* ----------------------------------------------------------------------- */
void Roots(unsigned Size, Complex *W) {
register unsigned i;
double phi;
Complex Omega;
phi = 4 * atan(1.0) / (double)Size; /* PI/Size */
Omega.re = cos(phi);
Omega.im = sin(phi);
W[0].re = 1.0;
W[0].im = 0.0;
for(i = 1; i < Size; i++) {
W[i].re = W[i-1].re * Omega.re - W[i-1].im * Omega.im;
W[i].im = W[i-1].re * Omega.im + W[i-1].im * Omega.re;
}
}
/* -----------------------------------------------------------------------
Procedure: FFT
Description: Recursive (divide and conquer) Fast Fourier Transform
Parameters:
Complex *A, transformed output signal
Complex *a, input signal
Complex *W, vector containing the roots
unsigned N, number of elements in a
unsigned stride, between consecutive elements in a to be considered
Complex *D, auxiliar vector to do combination
* ----------------------------------------------------------------------- */
void FFT(Complex *A, Complex *a, Complex *W, unsigned N,
unsigned stride, Complex *D) {
Complex *B, *C;
Complex Aux, *pW;
unsigned n;
int i;
if (N == 1) {
A[0].re = a[0].re;
A[0].im = a[0].im;
}
else {
/* Division stage without copying input data */
n = (N >> 1); /* N = N div 2 */
/* Subproblems resolution stage */
#pragma omp parallel for
for(i = 0; i <= 1; i++) {
FFT(D + i * n, a + i * stride, W, n, stride << 1, A + i * n);
}
/* Combination stage */
B = D;
C = D + n;
#pragma omp parallel for default(none) private(i, Aux, pW) shared(stride, n, A, B, C, W)
for(i = 0; i <= n - 1; i++) {
pW = W + i * stride;
Aux.re = pW->re * C[i].re - pW->im * C[i].im;
Aux.im = pW->re * C[i].im + pW->im * C[i].re;
A[i].re = B[i].re + Aux.re;
A[i].im = B[i].im + Aux.im;
A[i+n].re = B[i].re - Aux.re;
A[i+n].im = B[i].im - Aux.im;
}
}
}
/* ----------------------------------------------------------------------- */
unsigned get_params(int argc, char *argv[]) {
char usage_str[] = "<size_in_Kb>";
unsigned sizeInKb;
if (argc == 2)
sizeInKb = atoi(argv[1]);
else
if (argc == 1)
sizeInKb = DEFAULT_SIZE_IN_KB;
else {
printf("\nUse: %s %s\n", argv[0], usage_str);
exit(-1);
}
printf("\nUse: %s %s\n", argv[0], usage_str);
printf("Running with Size: %d K\n", sizeInKb);
return sizeInKb;
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
unsigned N;
Complex *a, *A, *W, *D;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Size of the input signal (in Kb)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"64"};
NUMTHREADS = 1; //omp_get_num_threads();
//OSCR_init (NUMTHREADS, "Divide and Conquer Fast Fourier Transform.", "Use 'fft' <size (in K)>", NUM_ARGS,
// PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
// argc, argv);
N = KILO * ARG; // OSCR_getarg_int(1);
/* N = KILO * get_params(argc, argv); */
/* Memory allocation */
a = (Complex*)calloc(N, sizeof(Complex));
A = (Complex*)calloc(N, sizeof(Complex));
D = (Complex*)calloc(N, sizeof(Complex));
W = (Complex*)calloc(N>>1, sizeof(Complex));
if((a==NULL) || (A==NULL) || (D==NULL) || (W==NULL)) {
printf("Not enough memory initializing arrays\n");
exit(1);
}
initialize(N, a); /* Generate test input signal */
/* write_array(N, a); */
Roots(N >> 1, W); /* Initialise the vector of imaginary roots */
//OSCR_timer_start(0);
FFT(A, a, W, N, 1, D);
//OSCR_timer_stop(0);
/* write_array(N, A); */
/* Display results and time */
printf("Test array: ");
if (test_array(N, A))
printf("Ok\n");
else
printf("Fails\n");
//OSCR_report(1, TIMERS_NAMES);
free(W);
free(D);
free(A);
free(a);
return 0;
}
/*
* vim:ts=2:sw=2:
*/
|
work.c | /********************************************************************
* BenchIT - Performance Measurement for Scientific Applications
* Contact: developer@benchit.org
*
* $Id: work.c 1 2009-09-11 12:26:19Z william $
* $URL: svn+ssh://william@rupert.zih.tu-dresden.de/svn-base/benchit-root/BenchITv6/kernel/numerical/gemv/C/OpenMP/0/double/work.c $
* For license details see COPYING in the package base directory
*******************************************************************/
/* Kernel: C DGEMV kernel
*******************************************************************/
#include "work.h"
void ij_(int sizeVector,int sizeAusgabe,double alpha,double beta, double *x, double *A, double *y)
{
int i,j;
double temp = 0.0;
#pragma omp parallel for private(j)
for (j=0;j<sizeAusgabe;j++)
{
y[j]=beta*y[j];
}
//
// now : x=x, A=A, y=beta*y
//
#pragma omp parallel for private(temp,j,i)
for (i=0;i<sizeVector;i++)
{
temp=alpha*x[i];
for (j=0;j<sizeAusgabe;j++)
{
y[j]=y[j]+A[i*sizeAusgabe+j]*temp;
}
}
}
void ji_(int sizeVector,int sizeAusgabe,double alpha,double beta, double *x, double *A, double *y)
{
int i,j;
double temp = 0.0;
#pragma omp parallel for private (j)
for (j=0;j<sizeAusgabe;j++)
{
y[j]=beta*y[j];
}
//
// now : x=x, A=A, y=beta*y
//
#pragma omp parallel for private(temp,j,i)
for (j=0;j<sizeAusgabe;j++)
{
temp=0.0;
for (i=0;i<sizeVector;i++)
{
temp=temp+A[i*sizeAusgabe+j]*x[i];
}
temp=temp*alpha;
y[j]=y[j]+temp;
}
}
|
maxent.c | /*
* Copyright 2010 Daniël de Kok
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <tinyest/bitvector.h>
#include <tinyest/dataset.h>
#include <tinyest/lbfgs.h>
#include <tinyest/maxent.h>
#include <tinyest/model.h>
typedef struct {
double l2_sigma_sq;
dataset_t *dataset;
model_t *model;
} maxent_lbfgs_data_t;
void maxent_context_sums(dataset_context_t *ctx, lbfgsfloatval_t const *params,
long double *sums, long double *z, bitvector_t *f_restrict)
{
*z = 0.0;
memset(sums, 0, ctx->n_events * sizeof(long double));
dataset_event_t *evts = ctx->events;
for (int j = 0; j < ctx->n_events; ++j) {
feature_value_t *fvals = evts[j].fvals;
for (int k = 0; k < evts[j].n_fvals; ++k)
if (f_restrict == 0 || bitvector_get(f_restrict, fvals[k].feature))
sums[j] += params[fvals[k].feature] * fvals[k].value;
sums[j] = expl(sums[j]);
*z += sums[j];
}
}
lbfgsfloatval_t maxent_lbfgs_evaluate(void *instance, lbfgsfloatval_t const *x,
lbfgsfloatval_t *g, int const n, lbfgsfloatval_t const step)
{
maxent_lbfgs_data_t *d= (maxent_lbfgs_data_t *) instance;
dataset_t *ds = d->dataset;
double *fvals = ds->feature_values;
for (int i = 0; i < d->dataset->n_features; ++i)
if (d->model->f_restrict == 0 ||
bitvector_get(d->model->f_restrict, i))
g[i] = -fvals[i];
lbfgsfloatval_t ll = 0.0;
dataset_context_t *ctxs = ds->contexts;
for (int i = 0; i < ds->n_contexts; ++i)
{
lbfgsfloatval_t ctxLl = 0.0;
// Skip contexts that have a probability of zero. If we allow such
// contexts, we can not calculate emperical p(y|x).
if (ctxs[i].p == 0.0)
continue;
long double *sums;
if ((sums = malloc(ctxs[i].n_events * sizeof(long double))) == NULL) {
perror("malloc() error in maxent_lbfgs_evaluate()");
exit(1);
}
long double z = 0.0;
maxent_context_sums(&ctxs[i], x, sums, &z, d->model->f_restrict);
// Cannot normalize over zero.
assert(z != 0.0);
dataset_event_t *evts = ctxs[i].events;
for (int j = 0; j < ctxs[i].n_events; ++j) {
// p(y|x)
double p_yx = sums[j] / z;
//fprintf(stderr, "p(y|x): %Lf\n", z);
// Update log-likelihood of the model.
ctxLl += evts[j].p * log(p_yx);
// Contribution of this context to p(f).
feature_value_t *fvals = evts[j].fvals;
for (int k = 0; k < evts[j].n_fvals; ++k)
if (d->model->f_restrict == 0 ||
bitvector_get(d->model->f_restrict, fvals[k].feature)) {
g[fvals[k].feature] += ctxs[i].p * p_yx * fvals[k].value;
}
}
#pragma omp atomic
ll += ctxLl;
free(sums);
}
if (d->l2_sigma_sq != 0.0) {
double sigma = 1.0 / d->l2_sigma_sq;
double f_sq_sum = 0.0;
for (int i = 0; i < d->dataset->n_features; ++i) {
if (d->model->f_restrict &&
!bitvector_get(d->model->f_restrict, i))
continue;
f_sq_sum += pow(x[i], 2.0);
// Impose Gaussian prior on gradient: g_i + x_i / sigma_sq
g[i] += x[i] * sigma;
}
// Impose Gaussian prior on log-likelihood: ll - sum(x) / 2 * sigma_sq
ll -= f_sq_sum * 0.5 * sigma;
}
return -ll;
}
int maxent_lbfgs_optimize(dataset_t *dataset, model_t *model,
lbfgs_parameter_t *params, double l2_sigma_sq)
{
maxent_lbfgs_data_t lbfgs_data = {l2_sigma_sq, dataset, model};
int r = lbfgs(dataset->n_features, model->params, 0, maxent_lbfgs_evaluate,
maxent_lbfgs_progress_verbose, &lbfgs_data, params);
return r;
}
void maxent_feature_gradients(dataset_t *dataset,
lbfgsfloatval_t *params,
lbfgsfloatval_t *gradients)
{
double *fvals = dataset->feature_values;
for (int i = 0; i < dataset->n_features; ++i)
gradients[i] = -fvals[i];
dataset_context_t *ctxs = dataset->contexts;
for (int i = 0; i < dataset->n_contexts; ++i)
{
if (ctxs[i].p == 0.0)
continue;
long double *sums;
if ((sums = malloc(ctxs[i].n_events * sizeof(long double))) == NULL) {
perror("malloc() error in maxent_feature_gradients()");
exit(1);
}
long double z;
maxent_context_sums(&ctxs[i], params, sums, &z, 0);
dataset_event_t *evts = ctxs[i].events;
for (int j = 0; j < ctxs[i].n_events; ++j) {
// p(y|x)
double p_yx = sums[j] / z;
// Contribution of this context to p(f).
feature_value_t *fvals = evts[j].fvals;
for (int k = 0; k < evts[j].n_fvals; ++k)
gradients[fvals[k].feature] += ctxs[i].p * p_yx * fvals[k].value;
}
free(sums);
}
}
int feature_score_compare(void const *l, void const *r)
{
feature_score_t *lfs = (feature_score_t *) l;
feature_score_t *rfs = (feature_score_t *) r;
double l_score = fabs(lfs->score);
double r_score = fabs(rfs->score);
if (l_score > r_score)
return -1;
else if (l_score < r_score)
return 1;
else
return 0;
}
int maxent_select_features(dataset_t *dataset, lbfgs_parameter_t *params,
model_t *model, double *gradients, int n_select)
{
int n_unselected = dataset->n_features - model->f_restrict->on;
feature_score_t *scores;
if ((scores = malloc(sizeof(feature_score_t) * n_unselected)) == NULL) {
perror("malloc() error in maxent_select_features()");
exit(1);
}
//memset(scores, 0, sizeof(feature_score_t *) * n_unselected);
int i;
int cand;
for (i = 0, cand = 0; i < dataset->n_features; ++i)
if (!bitvector_get(model->f_restrict, i)) {
scores[cand].feature = i;
scores[cand].score = gradients[i];
++cand;
}
assert(cand == n_unselected);
qsort(scores, n_unselected, sizeof(feature_score_t),
feature_score_compare);
// Pick features with the highest gradient.
int cur_select = 0;
for (int i = 0; i < n_unselected && cur_select < n_select; ++i) {
feature_score_t score = scores[i];
if (fabs(score.score) <= params->orthantwise_c)
break;
fprintf(stderr, "* %d\n", score.feature);
bitvector_set(model->f_restrict, score.feature, 1);
++cur_select;
}
// Free up scores.
free(scores);
return cur_select;
}
int maxent_lbfgs_grafting_light(dataset_t *dataset, model_t *model,
lbfgs_parameter_t *params, double l2_sigma_sq, int grafting_n)
{
params->max_iterations = 1;
lbfgsfloatval_t *g;
if ((g = lbfgs_malloc(dataset->n_features)) == NULL) {
perror("malloc() error in maxent_lbfgs_grafting_light()");
exit(1);
}
maxent_lbfgs_data_t lbfgs_data = {l2_sigma_sq, dataset, model};
int r = LBFGS_SUCCESS;
while (1) {
fprintf(stderr, "--- Feature selection ---\n");
// Calculate feature gradients.
maxent_feature_gradients(dataset, model->params, g);
// Select most promising features.
(void) maxent_select_features(dataset, params, model, g,
grafting_n);
fprintf(stderr, "--- Optimizing model ---\n");
r = lbfgs(dataset->n_features, model->params, 0, maxent_lbfgs_evaluate,
maxent_lbfgs_progress_verbose, &lbfgs_data, params);
if (r != LBFGSERR_MAXIMUMITERATION)
break;
}
lbfgs_free(g);
return r;
}
int maxent_lbfgs_grafting(dataset_t *dataset, model_t *model,
lbfgs_parameter_t *params, double l2_sigma_sq, int grafting_n)
{
lbfgsfloatval_t *g;
if ((g = lbfgs_malloc(dataset->n_features)) == NULL) {
perror("malloc() error in maxent_lbfgs_grafting()");
exit(1);
}
maxent_lbfgs_data_t lbfgs_data = {l2_sigma_sq, dataset, model};
int r = LBFGS_SUCCESS;
while (1) {
fprintf(stderr, "--- Feature selection ---\n");
// Calculate feature gradients.
maxent_feature_gradients(dataset, model->params, g);
// Select most promising features.
int n_selected = maxent_select_features(dataset, params, model, g,
grafting_n);
// No features...
if (n_selected == 0) {
fprintf(stderr, "done!\n");
break;
}
fprintf(stderr, "--- Optimizing model ---\n");
int r = lbfgs(dataset->n_features, model->params, 0, maxent_lbfgs_evaluate,
maxent_lbfgs_progress_verbose, &lbfgs_data, params);
if (r != LBFGS_STOP && r != LBFGS_SUCCESS && r != LBFGS_ALREADY_MINIMIZED)
break;
}
lbfgs_free(g);
return r;
}
int maxent_lbfgs_progress_verbose(void *instance, lbfgsfloatval_t const *x,
lbfgsfloatval_t const *g, lbfgsfloatval_t const fx,
lbfgsfloatval_t const xnorm, lbfgsfloatval_t const gnorm,
lbfgsfloatval_t const step, int n, int k, int ls)
{
fprintf(stderr, "%d\t%.4e\t%.4e\t%.4e\n", k, fx, xnorm, gnorm);
return 0;
}
|
ten_tusscher_3_two_region.c | #include "model_common.h"
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_3_two_region.h"
#define ENDO
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
if (extra_data == NULL)
{
sv[0] = -86.2f; // V; millivolt
sv[1] = 0.0f; //M
sv[2] = 0.75; //H
sv[3] = 0.75; //J
sv[4] = 0.0f; //Xr1
sv[5] = 0.0f; //Xs
sv[6] = 1.0f; //S
sv[7] = 1.0f; //F
sv[8] = 1.0f; //F2
sv[9] = 0.0; //D_INF
sv[10] = 0.0; //R_INF
sv[11] = 0.0; //Xr2_INF}
}
else
{
//region 1 = Healthy // region 2 = fibro or border
real *initial_conditions_1 = ((real*)extra_data) + 7; //pointer
real *initial_conditions_2 = ((real*)extra_data) + 26;
real *fibrosis = ((real*)extra_data) + 7 + 31; //pointer
if (fibrosis[sv_id] == 1.0)
{
sv[0] = initial_conditions_1[0]; // V; millivolt
sv[1] = initial_conditions_1[1]; //M
sv[2] = initial_conditions_1[2]; //H
sv[3] = initial_conditions_1[3]; //J
sv[4] = initial_conditions_1[4]; //Xr1
sv[5] = initial_conditions_1[5]; //Xs
sv[6] = initial_conditions_1[6]; //S
sv[7] = initial_conditions_1[7]; //F
sv[8] = initial_conditions_1[8]; //F2
sv[9] = initial_conditions_1[9]; //D_INF
sv[10] = initial_conditions_1[10]; //R_INF
sv[11] = initial_conditions_1[11]; //Xr2_INF}
}
else
{
sv[0] = initial_conditions_2[0]; // V; millivolt
sv[1] = initial_conditions_2[1]; //M
sv[2] = initial_conditions_2[2]; //H
sv[3] = initial_conditions_2[3]; //J
sv[4] = initial_conditions_2[4]; //Xr1
sv[5] = initial_conditions_2[5]; //Xs
sv[6] = initial_conditions_2[6]; //S
sv[7] = initial_conditions_2[7]; //F
sv[8] = initial_conditions_2[8]; //F2
sv[9] = initial_conditions_2[9]; //D_INF
sv[10] = initial_conditions_2[10]; //R_INF
sv[11] = initial_conditions_2[11]; //Xr2_INF}
}
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
real *fibrosis;
// Default values for a healthy cell ///////////
real atpi = 6.8f;
real Ko = 5.4f;
real Ki = 138.3f;
real Vm_change = 0.0;
real GNa_multiplicator = 1.0f;
real GCa_multiplicator = 1.0f;
real INaCa_multiplicator = 1.0f;
real V_0 = -86.2f; // V; millivolt
real M_0 = 0.0f; //M
real H_0 = 0.75; //H
real J_0 = 0.75; //J
real Xr1_0 = 0.0f; //Xr1
real Xs_0 = 0.0f; //Xs
real S_0 = 1.0f; //S
real F_0 = 1.0f; //F
real F2_0 = 1.0f; //F2
real D_inf_0 = 0.0; //D_INF
real R_inf_0 = 0.0; //R_INF
real Xr2_inf_0 = 0.0; //Xr2_INF}
////////////////////////////////////
///////////////////
int num_extra_parameters = 7; /*tenho +7 da reg 2 e +12 de initial_conditions*/
int num_initial_conditions = 12;
size_t extra_parameters_size = (num_extra_parameters+num_initial_conditions)*sizeof(real);
if(extra_data)
{
fibrosis = ((real*)extra_data) + num_extra_parameters + num_extra_parameters + num_initial_conditions + num_initial_conditions; //pointer
}
else
{
extra_data = malloc(extra_parameters_size);
((real*)extra_data)[0] = atpi;
((real*)extra_data)[1] = Ko;
((real*)extra_data)[2] = Ki;
((real*)extra_data)[3] = Vm_change;
((real*)extra_data)[4] = GNa_multiplicator;
((real*)extra_data)[5] = GCa_multiplicator;
((real*)extra_data)[6] = INaCa_multiplicator;
((real*)extra_data)[7] = V_0;
((real*)extra_data)[8] = M_0;
((real*)extra_data)[9] = H_0;
((real*)extra_data)[10] = J_0;
((real*)extra_data)[11] = Xr1_0;
((real*)extra_data)[12] = Xs_0;
((real*)extra_data)[13] = S_0;
((real*)extra_data)[14] = F_0;
((real*)extra_data)[15] = F2_0;
((real*)extra_data)[16] = D_inf_0;
((real*)extra_data)[17] = R_inf_0;
((real*)extra_data)[18] = Xr2_inf_0;
fibrosis = calloc(num_cells_to_solve, sizeof(real));
}
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j)
{
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], extra_data);
}
}
if(extra_data == NULL) free(fibrosis);
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real *extra_parameters) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt, fibrosis, extra_parameters);
//THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS
sv[0] = dt*rDY[0] + rY[0];
sv[1] = rDY[1];
sv[2] = rDY[2];
sv[3] = rDY[3];
sv[4] = rDY[4];
sv[5] = rDY[5];
sv[6] = rDY[6];
sv[7] = rDY[7];
sv[8] = rDY[8];
sv[9] = rDY[9];
sv[10] = rDY[10];
sv[11] = rDY[11];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real *extra_parameters) {
//fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone
//THIS IS THE STATE VECTOR THAT WE NEED TO SAVE IN THE STEADY STATE
const real svolt = sv[0];
const real sm = sv[1];
const real sh = sv[2];
const real sj = sv[3];
const real sxr1 = sv[4];
const real sxs = sv[5];
const real ss = sv[6];
const real sf = sv[7];
const real sf2 = sv[8];
const real D_INF = sv[9];
const real R_INF = sv[10];
const real Xr2_INF = sv[11];
const real natp = 0.24; // K dependence of ATP-sensitive K current
const real nicholsarea = 0.00005; // Nichol's areas (cm^2)
const real hatp = 2; // Hill coefficient
// Here we set the parameters based on the type of cell in region 1 or region 2...
real atpi;
real Ko;
real Ki;
real Vm_modifier;
real GNa_multplicator;
real GCaL_multplicator;
real INaCa_multplicator;
//Healthy cell
if(fibrosis==1)
{
atpi = extra_parameters[0];
Ko = extra_parameters[1];
Ki = extra_parameters[2];
Vm_modifier = extra_parameters[3];
GNa_multplicator = extra_parameters[4];
GCaL_multplicator = extra_parameters[5];
INaCa_multplicator = extra_parameters[6];
}
//Fibrotic or BorderZone cell
else
{
atpi = extra_parameters[19];
Ko = extra_parameters[20];
Ki = extra_parameters[21];
Vm_modifier = extra_parameters[22];
GNa_multplicator = extra_parameters[23];
GCaL_multplicator = extra_parameters[24];
INaCa_multplicator = extra_parameters[25];
}
//Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells)
//~ real atpi = extra_parameters[0];
real atpi_change = 6.8f - atpi;
atpi = atpi + atpi_change*fibrosis;
//Extracellular potassium concentration was elevated
//from its default value of 5.4 mM to values between 6.0 and 8.0 mM
//Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry
//~ real Ko = extra_parameters[1];
real Ko_change = 5.4f - Ko;
Ko = Ko + Ko_change*fibrosis;
//~ real Ki = extra_parameters[2];
real Ki_change = 138.3 - Ki;
Ki = Ki + Ki_change*fibrosis;
//~ real Vm_modifier = extra_parameters[3];
Vm_modifier = Vm_modifier - Vm_modifier*fibrosis;
//~ real GNa_multplicator = extra_parameters[4];
real GNa_multplicator_change = 1.0f - GNa_multplicator;
GNa_multplicator = GNa_multplicator + GNa_multplicator_change*fibrosis;
//~ real GCaL_multplicator = extra_parameters[5];
real GCaL_multplicator_change = 1.0f - GCaL_multplicator;
GCaL_multplicator = GCaL_multplicator + GCaL_multplicator_change*fibrosis;
//~ real INaCa_multplicator = extra_parameters[6];
real INaCa_multplicator_change = 1.0f - INaCa_multplicator;
INaCa_multplicator = INaCa_multplicator + INaCa_multplicator_change*fibrosis;
//real katp = 0.306;
//Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry
//real katp = 0.306;
const real katp = -0.0942857142857*atpi + 0.683142857143; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry
const real patp = 1/(1 + pow((atpi/katp),hatp));
const real gkatp = 0.000195/nicholsarea;
const real gkbaratp = gkatp*patp*pow((Ko/5.4),natp);
const real katp2= 1.4;
const real hatp2 = 2.6;
const real pcal = 1.0/(1.0 + pow((katp2/atpi),hatp2));
const real Cao=2.0;
const real Nao=140.0;
const real Cai=0.00007;
const real Nai=7.67;
//Constants
const real R=8314.472;
const real F=96485.3415;
const real T=310.0;
const real RTONF=(R*T)/F;
//Parameters for currents
//Parameters for IKr
const real Gkr=0.101;
//Parameters for Iks
const real pKNa=0.03;
#ifdef EPI
const real Gks=0.257;
#endif
#ifdef ENDO
const real Gks=0.392;
#endif
#ifdef MCELL
const real Gks=0.098;
#endif
//Parameters for Ik1
const real GK1=5.405;
//Parameters for Ito
#ifdef EPI
const real Gto=0.294;
#endif
#ifdef ENDO
const real Gto=0.073;
#endif
#ifdef MCELL
const real Gto=0.294;
#endif
//Parameters for INa
const real GNa=14.838*GNa_multplicator; //ACIDOSIS
//Parameters for IbNa
const real GbNa=0.00029;
//Parameters for INaK
const real KmK=1.0;
const real KmNa=40.0;
const real knak=2.724;
//Parameters for ICaL
const real GCaL=0.2786*pcal*GCaL_multplicator; //ACIDOSIS
//Parameters for IbCa
const real GbCa=0.000592;
//Parameters for INaCa
const real knaca=1000;
const real KmNai=87.5;
const real KmCa=1.38;
const real ksat=0.1;
const real n=0.35;
//Parameters for IpCa
const real GpCa=0.1238;
const real KpCa=0.0005;
//Parameters for IpK;
const real GpK=0.0293;
const real Ek=RTONF*(log((Ko/Ki)));
const real Ena=RTONF*(log((Nao/Nai)));
const real Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
const real Eca=0.5*RTONF*(log((Cao/Cai)));
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real IKatp;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real Xr1_INF;
real Xr2_INF_new;
real TAU_Xr1;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF_new;
real S_INF;
real TAU_S;
real Af;
real Bf;
real Cf;
real Af2;
real Bf2;
real Cf2;
real D_INF_new;
real TAU_F;
real F_INF;
real TAU_F2;
real F2_INF;
real sItot;
//Needed to compute currents
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*((svolt-Vm_modifier)-Ena); //ACIDOSIS
ICaL=GCaL*D_INF*sf*sf2*((svolt-Vm_modifier)-60); //ACIDOSIS
Ito=Gto*R_INF*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*Xr2_INF*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaCa = INaCa*INaCa_multplicator; //ACIDOSIS
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
IKatp = gkbaratp*(svolt-Ek);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
IKatp +
stim_current;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF_new=1./(1.+exp((svolt-(-88.))/24.));
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=(1400./(sqrt(1.+exp((5.-svolt)/6))));
Bxs=(1./(1.+exp((svolt-35.)/15.)));
TAU_Xs=Axs*Bxs+80;
#ifdef EPI
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF_new=1./(1.+exp((-8-svolt)/7.5));
F_INF=1./(1.+exp((svolt+20)/7));
Af=1102.5*exp(-(svolt+27)*(svolt+27)/225);
Bf=200./(1+exp((13-svolt)/10.));
Cf=(180./(1+exp((svolt+30)/10)))+20;
TAU_F=Af+Bf+Cf;
F2_INF=0.67/(1.+exp((svolt+35)/7))+0.33;
Af2=600*exp(-(svolt+27)*(svolt+27)/170);
Bf2=7.75/(1.+exp((25-svolt)/10));
Cf2=16/(1.+exp((svolt+30)/10));
TAU_F2=Af2+Bf2+Cf2;
//update voltage
rDY_[0] = -sItot;
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[6]= S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[7] =F_INF-(F_INF-sf)*exp(-dt/TAU_F);
rDY_[8] =F2_INF-(F2_INF-sf2)*exp(-dt/TAU_F2);
rDY_[9] = D_INF_new;
rDY_[10] = R_INF_new;
rDY_[11] = Xr2_INF_new;
}
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
Tutorial.h | //=================================================================================================
/*!
// \file blaze/Tutorial.h
// \brief Tutorial of the Blaze library
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_TUTORIAL_H_
#define _BLAZE_TUTORIAL_H_
//=================================================================================================
//
// BLAZE TUTORIAL
//
//=================================================================================================
//**Mainpage***************************************************************************************
/*!\mainpage
//
// \image html blaze300x150.jpg
//
// This is the API for the \b Blaze high performance C++ math library. It gives a complete
// overview of the individual features and sublibraries of \b Blaze. To get a first impression
// on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards,
// the following long tutorial covers the most important aspects of the \b Blaze math library.
// The tabs at the top of the page allow a direct access to the individual modules, namespaces,
// classes, and files of the \b Blaze library.\n\n
//
// \section table_of_content Table of Contents
//
// <ul>
// <li> \ref configuration_and_installation </li>
// <li> \ref getting_started </li>
// <li> \ref vectors
// <ul>
// <li> \ref vector_types </li>
// <li> \ref vector_operations </li>
// </ul>
// </li>
// <li> \ref matrices
// <ul>
// <li> \ref matrix_types </li>
// <li> \ref matrix_operations </li>
// </ul>
// </li>
// <li> \ref adaptors
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices </li>
// </ul>
// </li>
// <li> \ref views
// <ul>
// <li> \ref views_subvectors </li>
// <li> \ref views_element_selections </li>
// <li> \ref views_submatrices </li>
// <li> \ref views_rows </li>
// <li> \ref views_row_selections </li>
// <li> \ref views_columns </li>
// <li> \ref views_column_selections </li>
// <li> \ref views_bands </li>
// </ul>
// </li>
// <li> \ref arithmetic_operations
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref shared_memory_parallelization
// <ul>
// <li> \ref openmp_parallelization </li>
// <li> \ref cpp_threads_parallelization </li>
// <li> \ref boost_threads_parallelization </li>
// <li> \ref hpx_parallelization </li>
// <li> \ref serial_execution </li>
// </ul>
// </li>
// <li> \ref serialization
// <ul>
// <li> \ref vector_serialization </li>
// <li> \ref matrix_serialization </li>
// </ul>
// </li>
// <li> \ref customization
// <ul>
// <li> \ref configuration_files </li>
// <li> \ref vector_and_matrix_customization
// <ul>
// <li> \ref custom_data_members </li>
// <li> \ref custom_operations </li>
// <li> \ref custom_data_types </li>
// </ul>
// </li>
// <li> \ref error_reporting_customization </li>
// </ul>
// </li>
// <li> \ref blas_functions </li>
// <li> \ref lapack_functions </li>
// <li> \ref block_vectors_and_matrices </li>
// <li> \ref intra_statement_optimization </li>
// </ul>
*/
//*************************************************************************************************
//**Configuration and Installation*****************************************************************
/*!\page configuration_and_installation Configuration and Installation
//
// \tableofcontents
//
//
// Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system
// is a fairly easy two step process. In the following, this two step process is explained in
// detail, preceded only by a short summary of the requirements.
//
//
// \n \section requirements Requirements
// <hr>
//
// For maximum performance the \b Blaze library expects you to have a BLAS library installed
// (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>,
// <a href="http://developer.amd.com/libraries/acml/">ACML</a>,
// <a href="http://math-atlas.sourceforge.net">Atlas</a>,
// <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't
// have a BLAS library installed on your system, \b Blaze will still work and will not be reduced
// in functionality, but performance may be limited. Thus it is strongly recommended to install a
// BLAS library.
//
// Additionally, for computing the determinant of a dense matrix, for the decomposition of dense
// matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular
// values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either
// of these features is used it is necessary to link the LAPACK library to the final executable.
// If no LAPACK library is available the use of these features will result in a linker error.
//
// Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this
// case the Boost library is required to be installed on your system. It is recommended to use the
// newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If
// you don't have Boost installed on your system, you can download it for free from
// <a href="http://www.boost.org">www.boost.org</a>.
//
//
// \n \section step_1_installation Step 1: Installation
// <hr>
//
// \subsection step_1_cmake Installation via CMake
//
// The first step is the installation of the \b Blaze header files. The most convenient way
// to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the
// following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to
// the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to
// \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake.
\code
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/
sudo make install
\endcode
// Windows users can do the same via the cmake-gui. Alternatively, it is possible to include
// \b Blaze by adding the following lines in any \c CMakeLists.txt file:
\code
find_package( blaze )
if( blaze_FOUND )
add_library( blaze_target INTERFACE )
target_link_libraries( blaze_target INTERFACE blaze::blaze )
endif()
\endcode
// \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool
//
// An alternate way to install \b Blaze for Windows users is Microsoft's
// <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can
// be installed via the command line:
\code
C:\src\vcpkg> .\vcpkg install blaze
\endcode
// The tool automatically downloads the latest \b Blaze release and copies the header files to
// the common include directory.
//
// \n \subsection step_1_installation_unix Manual Installation on Linux/macOS
//
// Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply
// copied to a standard include directory (note that this requires root privileges):
\code
cp -r ./blaze /usr/local/include
\endcode
// Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the
// \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be
// searched after any directories specified on the command line with the option \c -I and
// before the standard default directories (such as \c /usr/local/include and \c /usr/include).
// Assuming a user named 'Jon', the environment variable can be set as follows:
\code
CPLUS_INCLUDE_PATH=/usr/home/jon/blaze
export CPLUS_INCLUDE_PATH
\endcode
// Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the
// command line. The following example demonstrates this by means of the GNU C++ compiler:
\code
g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp
\endcode
// \n \subsection step_1_installation_windows Manual Installation on Windows
//
// Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be
// copied to any other directory or simply left in the default \b Blaze directory. However, the
// chosen include directory has to be explicitly specified as include path. In Visual Studio,
// this is done via the project property pages, configuration properties, C/C++, General settings.
// Here the additional include directories can be specified.
//
//
// \n \section step_2_configuration Step 2: Configuration
// <hr>
//
// The second step is the configuration and customization of the \b Blaze library. Many aspects
// of \b Blaze can be adapted to specific requirements, environments and architectures. The most
// convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt>
// subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header
// files can be customized manually. In both cases, however, the files are modified. If this is
// not an option it is possible to configure \b Blaze via the command line (see the tutorial
// section \ref configuration_files or the documentation in the configuration files).
//
// Since the default settings are reasonable for most systems this step can also be skipped.
// However, in order to achieve maximum performance a customization of at least the following
// configuration files is required:
//
// - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled
// to use a third-party BLAS library for several basic linear algebra functions (such as for
// instance dense matrix multiplications). In case no BLAS library is used, all linear algebra
// functions use the default implementations of the \b Blaze library and therefore BLAS is not a
// requirement for the compilation process. However, please note that performance may be limited.
// - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache
// settings. \b Blaze uses this information to optimize its cache usage. For maximum performance
// it is recommended to adapt these setting to a specific target architecture.
// - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the
// customization of the \b Blaze compute kernels. In order to tune the kernels for a specific
// architecture and to maximize performance it can be necessary to adjust the thresholds,
// especially for a parallel execution (see \ref shared_memory_parallelization).
//
// For an overview of other customization options and more details, please see the section
// \ref configuration_files.
//
//
// \n \section blaze_version Blaze Version
// <hr>
//
// The current major and minor version number of the \b Blaze library can be found in the
// <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the
// <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros,
// which can for instance be used for conditional compilation:
\code
#define BLAZE_MAJOR_VERSION 3
#define BLAZE_MINOR_VERSION 2
\endcode
// \n Next: \ref getting_started
*/
//*************************************************************************************************
//**Getting Started********************************************************************************
/*!\page getting_started Getting Started
//
// This short tutorial serves the purpose to give a quick overview of the way mathematical
// expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following
// long tutorial covers the most important aspects of the \b Blaze math library.
//
//
// \n \section getting_started_vector_example A First Example
//
// \b Blaze is written such that using mathematical expressions is as close to mathematical
// textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly
// easiest solution is the right solution and most users experience no problems when trying to
// use \b Blaze in the most natural way. The following example gives a first impression of the
// formulation of a vector addition in \b Blaze:
\code
#include <iostream>
#include <blaze/Math.h>
using blaze::StaticVector;
using blaze::DynamicVector;
// Instantiation of a static 3D column vector. The vector is directly initialized as
// ( 4 -2 5 )
StaticVector<int,3UL> a{ 4, -2, 5 };
// Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to
// ( 2 5 -3 )
DynamicVector<int> b( 3UL );
b[0] = 2;
b[1] = 5;
b[2] = -3;
// Adding the vectors a and b
DynamicVector<int> c = a + b;
// Printing the result of the vector addition
std::cout << "c =\n" << c << "\n";
\endcode
// Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header
// file. Alternatively, the entire \b Blaze library, including both the math and the entire
// utility module, can be included via the \c blaze/Blaze.h header file. Also note that all
// classes and functions of \b Blaze are contained in the blaze namespace.\n\n
//
// Assuming that this program resides in a source file called \c FirstExample.cpp, it can be
// compiled for instance via the GNU C++ compiler:
\code
g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp
\endcode
// Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum
// performance, it is necessary to compile the program in release mode, which deactivates
// all debugging functionality inside \b Blaze. It is also strongly recommended to specify
// the available architecture specific instruction set (as for instance the AVX instruction
// set, which if available can be activated via the \c -mavx flag). This allows \b Blaze
// to optimize computations via vectorization.\n\n
//
// When running the resulting executable \c FirstExample, the output of the last line of
// this small program is
\code
c =
6
3
2
\endcode
// \n \section getting_started_matrix_example An Example Involving Matrices
//
// Similarly easy and intuitive are expressions involving matrices:
\code
#include <blaze/Math.h>
using namespace blaze;
// Instantiating a dynamic 3D column vector
DynamicVector<int> x{ 4, -1, 3 };
// Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call
// operator three values of the matrix are explicitly set to get the matrix
// ( 1 0 4 )
// ( 0 -2 0 )
DynamicMatrix<int> A( 2UL, 3UL, 0 );
A(0,0) = 1;
A(0,2) = 4;
A(1,1) = -2;
// Performing a matrix/vector multiplication
DynamicVector<int> y = A * x;
// Printing the resulting vector
std::cout << "y =\n" << y << "\n";
// Instantiating a static column-major matrix. The matrix is directly initialized as
// ( 3 -1 )
// ( 0 2 )
// ( -1 0 )
StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } };
// Performing a matrix/matrix multiplication
DynamicMatrix<int> C = A * B;
// Printing the resulting matrix
std::cout << "C =\n" << C << "\n";
\endcode
// The output of this program is
\code
y =
16
2
C =
( -1 -1 )
( 0 4 )
\endcode
// \n \section getting_started_complex_example A Complex Example
//
// The following example is much more sophisticated. It shows the implementation of the Conjugate
// Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the
// \b Blaze library:
//
// \image html cg.jpg
//
// In this example it is not important to understand the CG algorithm itself, but to see the
// advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a
// sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$
// unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical
// formulation and therefore has huge advantages in terms of readability and maintainability,
// while the performance of the code is close to the expected theoretical peak performance:
\code
const size_t NN( N*N );
blaze::CompressedMatrix<double,rowMajor> A( NN, NN );
blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN );
double alpha, beta, delta;
// ... Initializing the sparse matrix A
// Performing the CG algorithm
r = b - A * x;
p = r;
delta = (r,r);
for( size_t iteration=0UL; iteration<iterations; ++iteration )
{
Ap = A * p;
alpha = delta / (p,Ap);
x += alpha * p;
r -= alpha * Ap;
beta = (r,r);
if( std::sqrt( beta ) < 1E-8 ) break;
p = r + ( beta / delta ) * p;
delta = beta;
}
\endcode
// \n Hopefully this short tutorial gives a good first impression of how mathematical expressions
// are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types,
// will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and
// matrix types, all possible operations on vectors and matrices, and of course all possible
// mathematical expressions.
//
// \n Previous: \ref configuration_and_installation Next: \ref vectors
*/
//*************************************************************************************************
//**Vectors****************************************************************************************
/*!\page vectors Vectors
//
// \tableofcontents
//
//
// \n \section vectors_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector,
// \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector)
// and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified
// as either column vectors or row vectors:
\code
using blaze::DynamicVector;
using blaze::columnVector;
using blaze::rowVector;
// Setup of the 3-dimensional dense column vector
//
// ( 1 )
// ( 2 )
// ( 3 )
//
DynamicVector<int,columnVector> a{ 1, 2, 3 };
// Setup of the 3-dimensional dense row vector
//
// ( 4 5 6 )
//
DynamicVector<int,rowVector> b{ 4, 5, 6 };
\endcode
// Per default, all vectors in \b Blaze are column vectors:
\code
// Instantiation of a 3-dimensional column vector
blaze::DynamicVector<int> c( 3UL );
\endcode
// \n \section vectors_details Vector Details
// <hr>
//
// - \ref vector_types
// - \ref vector_operations
//
//
// \n \section vectors_examples Examples
// <hr>
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowVector;
using blaze::columnVector;
StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector
CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector
DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector
// ... Resizing and initialization
c = a + trans( b );
\endcode
// \n Previous: \ref getting_started Next: \ref vector_types
*/
//*************************************************************************************************
//**Vector Types***********************************************************************************
/*!\page vector_types Vector Types
//
// \tableofcontents
//
//
// \n \section vector_types_static_vector StaticVector
// <hr>
//
// The blaze::StaticVector class template is the representation of a fixed size vector with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class StaticVector;
\endcode
// - \c Type: specifies the type of the vector elements. StaticVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the total number of vector elements. It is expected that StaticVector is
// only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at
// compile time:
\code
// Definition of a 3-dimensional integral column vector
blaze::StaticVector<int,3UL> a;
// Definition of a 4-dimensional single precision column vector
blaze::StaticVector<float,4UL,blaze::columnVector> b;
// Definition of a 6-dimensional double precision row vector
blaze::StaticVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_dynamic_vector DynamicVector
// <hr>
//
// The blaze::DynamicVector class template is the representation of an arbitrary sized vector
// with dynamically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/DynamicVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class DynamicVector;
\endcode
// - \c Type: specifies the type of the vector elements. DynamicVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best
// choice for medium to large vectors. Its size can be modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::DynamicVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::DynamicVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::DynamicVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_hybrid_vector HybridVector
// <hr>
//
// The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and
// the blaze::DynamicVector class templates. It represents a fixed size vector with statically
// allocated elements, but still can be dynamically resized (within the bounds of the available
// memory). It can be included via the header file
\code
#include <blaze/math/HybridVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class HybridVector;
\endcode
// - \c Type: specifies the type of the vector elements. HybridVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the maximum number of vector elements. It is expected that HybridVector
// is only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not
// known at compile time or not fixed at runtime, but whose maximum size is known at compile
// time:
\code
// Definition of a 3-dimensional integral column vector with a maximum size of 6
blaze::HybridVector<int,6UL> a( 3UL );
// Definition of a 4-dimensional single precision column vector with a maximum size of 16
blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0 and a maximum size of 6
blaze::HybridVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_custom_vector CustomVector
// <hr>
//
// The blaze::CustomVector class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data
// structure. Thus in contrast to all other dense vector types a custom vector does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom vector can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomVector.h>
\endcode
// The type of the elements, the properties of the given array of elements and the transpose
// flag of the vector can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool TF >
class CustomVector;
\endcode
// - Type: specifies the type of the vector elements. blaze::CustomVector can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CustomVector is the right choice if any external array needs to be represented as
// a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>;
std::vector<int> vec( 7UL );
UnalignedUnpadded a( &vec[0], 7UL );
// Definition of a managed custom column vector for unaligned but padded 'float' arrays
using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>;
std::unique_ptr<float[]> memory1( new float[16] );
UnalignedPadded b( memory1.get(), 9UL, 16UL );
// Definition of a managed custom row vector for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) );
AlignedUnpadded c( memory2.get(), 7UL );
// Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>;
std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) );
AlignedPadded d( memory3.get(), 5UL, 8UL );
\endcode
// In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several
// special characteristics. All of these result from the fact that a custom vector is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref vector_types_custom_vector_memory_management</b>
// -# <b>\ref vector_types_custom_vector_copy_operations</b>
// -# <b>\ref vector_types_custom_vector_alignment</b>
// -# <b>\ref vector_types_custom_vector_padding</b>
//
// \n \subsection vector_types_custom_vector_memory_management Memory Management
//
// The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// vector data structure. However, this flexibility comes with the price that the user of a custom
// vector is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom vectors:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3-dimensional custom vector with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom vector!
std::vector<int> vec( 3UL );
CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL );
// Definition of a custom vector with size 3 and capacity 16 with aligned, padded and
// externally managed integer array. Note that the std::unique_ptr must be guaranteed
// to outlive the custom vector!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) );
CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL );
\endcode
// \n \subsection vector_types_custom_vector_copy_operations Copy Operations
//
// As with all dense vectors it is possible to copy construct a custom vector:
\code
using blaze::CustomVector;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomVector<int,unaligned,unpadded>;
std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10
CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector
a[1] = 20; // Also modifies the std::vector
CustomType b( a ); // Creating a copy of vector a
b[2] = 20; // Also affects vector a and the std::vector
\endcode
// It is important to note that a custom vector acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom vector that is referencing and representing
// the same array as the original custom vector.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom vector, but modifies the values of the array:
\code
std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4
CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector
a = c; // Copy assignment: Set all values of vector a and b to 4.
\endcode
// \n \subsection vector_types_custom_vector_alignment Alignment
//
// In case the custom vector is specified as \c aligned the passed array must be guaranteed to
// be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For
// instance, if AVX is active an array of integers must be 32-bit aligned:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) );
CustomVector<int,aligned,unpadded> a( memory.get(), 5UL );
\endcode
// In case the alignment requirements are violated, a \c std::invalid_argument exception is
// thrown.
//
// \n \subsection vector_types_custom_vector_padding Padding
//
// Adding padding elements to the end of an array can have a significant impact on the performance.
// For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors
// of double precision values can be added via a single SIMD addition operation:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomVector<double,aligned,padded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) );
// Creating padded custom vectors of size 3 and a capacity of 4
CustomType a( memory1.get(), 3UL, 4UL );
CustomType b( memory2.get(), 3UL, 4UL );
CustomType c( memory3.get(), 3UL, 4UL );
// ... Initialization
c = a + b; // AVX-based vector addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted, a scalar addition has to be used:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomVector<double,aligned,unpadded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) );
// Creating unpadded custom vector of size 3
CustomType a( allocate<double>( 3UL ), 3UL );
CustomType b( allocate<double>( 3UL ), 3UL );
CustomType c( allocate<double>( 3UL ), 3UL );
// ... Initialization
c = a + b; // Scalar vector addition
\endcode
// Note the different number of constructor parameters for unpadded and padded custom vectors:
// In contrast to unpadded vectors, where during the construction only the size of the array
// has to be specified, during the construction of a padded custom vector it is additionally
// necessary to explicitly specify the capacity of the array.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom vector the added padding elements must
// guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector
// width. In case of unaligned padded vectors the number of padding elements can be greater or
// equal the number of padding elements of an aligned padded custom vector. In case the padding
// is insufficient with respect to the available instruction set, a \a std::invalid_argument
// exception is thrown.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \section vector_types_compressed_vector CompressedVector
// <hr>
//
// The blaze::CompressedVector class is the representation of an arbitrarily sized sparse
// vector, which stores only non-zero elements of arbitrary type. It can be included via the
// header file
\code
#include <blaze/math/CompressedVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class CompressedVector;
\endcode
// - \c Type: specifies the type of the vector elements. CompressedVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CompressedVector is the right choice for all kinds of sparse vectors:
\code
// Definition of a 3-dimensional integral column vector
blaze::CompressedVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements
blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL );
// Definition of a double precision row vector with size 0
blaze::CompressedVector<double,blaze::rowVector> c;
\endcode
// \n Previous: \ref vectors Next: \ref vector_operations
*/
//*************************************************************************************************
//**Vector Operations******************************************************************************
/*!\page vector_operations Vector Operations
//
// \tableofcontents
//
//
// \n \section vector_operations_constructors Constructors
// <hr>
//
// Instantiating and setting up a vector is very easy and intuitive. However, there are a few
// rules to take care of:
// - In case the last template parameter (the transpose flag) is omitted, the vector is per
// default a column vector.
// - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection vector_operations_default_construction Default Construction
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
// All vectors can be default constructed. Whereas the size
// of StaticVectors is fixed via the second template parameter,
// the initial size of a default constructed DynamicVector or
// CompressedVector is 0.
StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector.
// All elements are initialized to 0.
StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector.
// Again, all elements are initialized to 0L.
DynamicVector<float> v3; // Instantiation of a dynamic single precision column
// vector of size 0.
DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row
// vector of size 0.
CompressedVector<int> v5; // Instantiation of a compressed integer column
// vector of size 0.
CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row
// vector of size 0.
\endcode
// \n \subsection vector_operations_size_construction Construction with Specific Size
//
// The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that
// allows to immediately give the vector the required size. Whereas both dense vectors (i.e.
// \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector
// elements, \c CompressedVector merely acquires the size but remains empty.
\code
DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector
// of size 9. The elements are NOT initialized!
HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single
// precision complex values. The elements are
// default constructed.
CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with
// size 10. Initially, the vector provides no
// capacity for non-zero elements.
\endcode
// \n \subsection vector_operations_initialization_constructors Initialization Constructors
//
// All dense vector classes offer a constructor that allows for a direct, homogeneous initialization
// of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements
// can be specified
\code
StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector.
// All elements are initialized to 2.
DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision
// column vector of size 3. All elements are
// set to 7.0F.
CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column
// vector of size 15, which provides enough
// space for at least 3 non-zero elements.
\endcode
// \n \subsection vector_operations_array_construction Array Construction
//
// Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic
// or static array. If the vector is initialized from a dynamic array, the constructor expects the
// actual size of the array as first argument, the array as second argument. In case of a static
// array, the fixed size of the array is used:
\code
const unique_ptr<double[]> array1( new double[2] );
// ... Initialization of the dynamic array
blaze::StaticVector<double,2UL> v13( 2UL, array1.get() );
int array2[4] = { 4, -5, -6, 7 };
blaze::StaticVector<int,4UL> v14( array2 );
\endcode
// \n \subsection vector_operations_initializer_list_construction Initializer List Construction
//
// In addition, all dense and sparse vector classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F };
blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 };
\endcode
// In case of sparse vectors, only the non-zero elements are used to initialize the vector.
//
// \n \subsection vector_operations_copy_construction Copy Construction
//
// All dense and sparse vectors can be created as the copy of any other dense or sparse vector
// with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector).
\code
StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the dense column vector v17
// as copy of the dense column vector v7.
DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the dense row vector v18 as
// copy of the sparse row vector v9.
CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the sparse column vector v19
// as copy of the dense column vector v1.
CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the sparse row vector v20 as
// copy of the row vector v12.
\endcode
// Note that it is not possible to create a \c StaticVector as a copy of a vector with a different
// size:
\code
StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size does not match!
StaticVector<int,4UL,rowVector> v22( v10 ); // Compile time error: Size does not match!
\endcode
// \n \section vector_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse vectors:
// \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment,
// \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment.
//
// \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment
//
// Sometimes it may be necessary to assign the same value to all elements of a dense vector.
// For this purpose, the assignment operator can be used:
\code
blaze::StaticVector<int,3UL> v1;
blaze::DynamicVector<double> v2;
// Setting all integer elements of the StaticVector to 2
v1 = 2;
// Setting all double precision elements of the DynamicVector to 5.0
v2 = 5.0;
\endcode
// \n \subsection vector_operations_array_assignment Array Assignment
//
// Dense vectors can also be assigned a static array:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
float array1[2] = { 1.0F, 2.0F };
double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 };
v1 = array1;
v2 = array2;
\endcode
// \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// vector:
\code
blaze::DynamicVector<float> v1;
blaze::CompressedVector<double,rowVector> v2;
v1 = { 1.0F, 2.0F };
v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 };
\endcode
// In case of sparse vectors, only the non-zero elements are considered.
//
// \n \subsection vector_operations_copy_assignment Copy Assignment
//
// For all vector types it is generally possible to assign another vector with the same transpose
// flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the
// assigned vector is required to have the same size as the \c StaticVector since the size of a
// \c StaticVector cannot be adapted!
\code
blaze::StaticVector<int,3UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 3UL );
blaze::DynamicVector<float,columnVector> v3( 5UL );
blaze::CompressedVector<int,columnVector> v4( 3UL );
blaze::CompressedVector<float,rowVector> v5( 3UL );
// ... Initialization of the vectors
v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector
v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector
v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector
v1 = v5; // Compilation error: Cannot assign a row vector to a column vector
\endcode
// \n \subsection vector_operations_compound_assignment Compound Assignment
//
// Next to plain assignment, it is also possible to use addition assignment, subtraction
// assignment, and multiplication assignment. Note however, that in contrast to plain assignment
// the size and the transpose flag of the vectors has be to equal in order to able to perform a
// compound assignment.
\code
blaze::StaticVector<int,5UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 5UL );
blaze::CompressedVector<float,columnVector> v3( 7UL );
blaze::DynamicVector<float,rowVector> v4( 7UL );
blaze::CompressedVector<float,rowVector> v5( 7UL );
// ... Initialization of the vectors
v1 += v2; // OK: Addition assignment between two column vectors of the same size
v1 += v3; // Runtime error: No compound assignment between vectors of different size
v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag
v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size
\endcode
// \n \section vector_operations_element_access Element Access
// <hr>
//
// \n \subsection vector_operations_subscript_operator_1 Subscript Operator
//
// The easiest and most intuitive way to access a dense or sparse vector is via the subscript
// operator. The indices to access a vector are zero-based:
\code
blaze::DynamicVector<int> v1( 5UL );
v1[0] = 1;
v1[1] = 3;
// ...
blaze::CompressedVector<float> v2( 5UL );
v2[2] = 7.3F;
v2[4] = -1.4F;
\endcode
// Whereas using the subscript operator on a dense vector only accesses the already existing
// element, accessing an element of a sparse vector via the subscript operator potentially
// inserts the element into the vector and may therefore be more expensive. Consider the
// following example:
\code
blaze::CompressedVector<int> v1( 10UL );
for( size_t i=0UL; i<v1.size(); ++i ) {
... = v1[i];
}
\endcode
// Although the compressed vector is only used for read access within the for loop, using the
// subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the
// preferred way to traverse the non-zero elements of a sparse vector is to use iterators.
//
// \n \subsection vector_operations_iterators Iterators
//
// All vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end(), and \c cend() functions to traverse the currently contained elements by iterators.
// In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a
// manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or
// \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 10UL );
// ... Initialization of the vector
// Traversing the vector by Iterator
for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
// Traversing the vector by ConstIterator
for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) {
// ...
}
for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) {
// ...
}
\endcode
// \n \section vector_operations_element_insertion Element Insertion
// <hr>
//
// In contrast to dense vectors, that store all elements independent of their value and that
// offer direct access to all elements, spares vectors only store the non-zero elements contained
// in the vector. Therefore it is necessary to explicitly add elements to the vector.
//
// \n \subsection vector_operations_subscript_operator_2 Subscript Operator
//
// The first option to add elements to a sparse vector is the subscript operator:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 3UL );
v1[1] = 2;
\endcode
// In case the element at the given index is not yet contained in the vector, it is automatically
// inserted. Otherwise the old value is replaced by the new value 2. The operator returns a
// reference to the sparse vector element.
//
// \n \subsection vector_operations_set .set()
//
// An alternative to the subscript operator is the \c set() function: In case the element is not
// yet contained in the vector the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at index 3
v1.set( 3, 1 );
\endcode
// \n \subsection vector_operations_insert .insert()
//
// The insertion of elements can be better controlled via the \c insert() function. In contrast to
// the subscript operator and the \c set() function it emits an exception in case the element is
// already contained in the vector. In order to check for this case, the \c find() function can be
// used:
\code
// In case the element at index 4 is not yet contained in the matrix it is inserted
// with a value of 6.
if( v1.find( 4 ) == v1.end() )
v1.insert( 4, 6 );
\endcode
// \n \subsection vector_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not suited
// for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill
// a sparse vector is the \c append() function. It requires the sparse vector to provide enough
// capacity to insert a new element. Additionally, the index of the new element must be larger
// than the index of the previous element. Violating these conditions results in undefined
// behavior!
\code
v1.reserve( 10 ); // Reserving space for 10 non-zero elements
v1.append( 5, -2 ); // Appending the element -2 at index 5
v1.append( 6, 4 ); // Appending the element 4 at index 6
// ...
\endcode
// \n \section vector_operations_element_removal Element Removal
// <hr>
//
// \subsection vector_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse vector. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedVector;
CompressedVector<int> v( 42 );
// ... Initialization of the vector
// Erasing the element at index 21
v.erase( 21 );
// Erasing a single element via iterator
v.erase( v.find( 4 ) );
// Erasing all non-zero elements in the range [7..24]
v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
v.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] with a value larger than 5
v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } );
\endcode
// \n \section vector_operations_element_lookup Element Lookup
// <hr>
//
// A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever
// accessing a vector element at a specific index a lookup operation is required. Whereas the
// subscript operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection vector_operations_find .find()
//
// The \c find() function can be used to check whether a specific element is contained in a sparse
// vector. It specifically searches for the element at the given index. In case the element is
// found, the function returns an iterator to the element. Otherwise an iterator just past the
// last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that
// the returned iterator is subject to invalidation due to inserting operations via the subscript
// operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the element at index 7. In case the element is not
// contained in the vector, the end() iterator is returned.
CompressedVector<int>::Iterator pos( a.find( 7 ) );
if( pos != a.end( 7 ) ) {
// ...
}
\endcode
// \n \subsection vector_operations_lowerbound .lowerBound()
//
// The \c lowerBound() function returns an iterator to the first element with an index not less
// then the given index. In combination with the \c upperBound() function this function can be
// used to create a pair of iterators specifying a range of indices. Note that the returned
// iterator is subject to invalidation due to inserting operations via the subscript operator,
// the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// \n \subsection vector_operations_upperbound .upperBound()
//
// The \c upperBound() function returns an iterator to the first element with an index greater then
// the given index. In combination with the \c lowerBound() function this function can be used to
// create a pair of iterators specifying a range of indices. Note that the returned iterator is
// subject to invalidation due to inserting operations via the subscript operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// \n \section vector_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection vector_operations_size .size()
//
// Via the \c size() member function, the current size of a dense or sparse vector can be queried:
\code
// Instantiating a dynamic vector with size 10
blaze::DynamicVector<int> v1( 10UL );
v1.size(); // Returns 10
// Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements
blaze::CompressedVector<double> v2( 12UL, 3UL );
v2.size(); // Returns 12
\endcode
// Alternatively, the free function \c size() can be used to query to current size of a vector.
// In contrast to the member function, the free function can also be used to query the size of
// vector expressions:
\code
size( v1 ); // Returns 10, i.e. has the same effect as the member function
size( v2 ); // Returns 12, i.e. has the same effect as the member function
blaze::DynamicMatrix<int> A( 15UL, 12UL );
size( A * v2 ); // Returns 15, i.e. the size of the resulting vector
\endcode
// \n \subsection vector_operations_capacity .capacity()
//
// Via the \c capacity() (member) function the internal capacity of a dense or sparse vector
// can be queried. Note that the capacity of a vector doesn't have to be equal to the size
// of a vector. In case of a dense vector the capacity will always be greater or equal than
// the size of the vector, in case of a sparse vector the capacity may even be less than
// the size.
\code
v1.capacity(); // Returns at least 10
\endcode
// For symmetry reasons, there is also a free function /c capacity() available that can be used
// to query the capacity:
\code
capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function
\endcode
// Note, however, that it is not possible to query the capacity of a vector expression:
\code
capacity( A * v1 ); // Compilation error!
\endcode
// \n \subsection vector_operations_nonzeros .nonZeros()
//
// For both dense and sparse vectors the number of non-zero elements can be determined via the
// \c nonZeros() member function. Sparse vectors directly return their number of non-zero
// elements, dense vectors traverse their elements and count the number of non-zero elements.
\code
v1.nonZeros(); // Returns the number of non-zero elements in the dense vector
v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector
\endcode
// There is also a free function \c nonZeros() available to query the current number of non-zero
// elements:
\code
nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector
nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in
// a vector expression. However, the result is not the exact number of non-zero elements, but
// may be a rough estimation:
\code
nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression
\endcode
// \n \subsection vector_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse vector for non-a-number
// elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
// If at least one element of the vector is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for vectors with floating point
// elements. The attempt to use it for a vector with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection vector_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse vector is in default state:
\code
blaze::HybridVector<int,20UL> a;
// ... Resizing and initialization
if( isDefault( a ) ) { ... }
\endcode
// A vector is in default state if it appears to just have been default constructed. All resizable
// vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are
// in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all
// subvectors, element selections, rows, and columns) is in default state if all its elements are
// in default state. For instance, in case the vector is instantiated for a built-in integral or
// floating point data type, the function returns \c true in case all vector elements are 0 and
// \c false in case any vector element is not 0.
//
//
// \n \subsection vector_operations_isUniform isUniform()
//
// In order to check if all vector elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isUniform( a ) ) { ... }
\endcode
// Note that in case of sparse vectors also the zero elements are also taken into account!
//
//
// \n \subsection vector_operations_length length() / sqrLength()
//
// In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length()
// and \c sqrLength() function can be used:
\code
blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F };
const float len = length ( v ); // Computes the current length of the vector
const float sqrlen = sqrLength( v ); // Computes the square length of the vector
\endcode
// Note that both functions can only be used for vectors with built-in or complex element type!
//
//
// \n \subsection vector_operations_vector_trans trans()
//
// As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors
// (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However,
// vectors can be transposed via the \c trans() function:
\code
blaze::DynamicVector<int,columnVector> v1( 4UL );
blaze::CompressedVector<int,rowVector> v2( 4UL );
v1 = v2; // Compilation error: Cannot assign a row vector to a column vector
v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it
// to the column vector v1
v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2
v1 += trans( v2 ); // OK: Addition assignment of two column vectors
\endcode
// \n \subsection vector_operations_ctrans ctrans()
//
// It is also possible to compute the conjugate transpose of a vector. This operation is available
// via the \c ctrans() function:
\code
blaze::CompressedVector< complex<float>, rowVector > v1( 4UL );
blaze::DynamicVector< complex<float>, columnVector > v2( 4UL );
v1 = ctrans( v2 ); // Compute the conjugate transpose vector
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector
v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector
\endcode
// \n \subsection vector_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given vector expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a dense and a sparse vector:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
auto c = evaluate( a * b );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary vector is created and no copy operation is performed. Instead, the result
// is directly written to the target vector due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
CompressedVector<double> d( a * b ); // No temporary & no copy operation
DynamicVector<double> e( a * b ); // Temporary & copy operation
d = evaluate( a * b ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicVector<double> a, b, c, d;
d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector
d = a + eval( b * c ); // No creation of a temporary vector
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section vector_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection vector_operations_resize_reserve .resize() / .reserve()
//
// The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector
// cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as
// \c CompressedVectors can be changed via the \c resize() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<int,columnVector> v1;
CompressedVector<int,rowVector> v2( 4 );
v2[1] = -2;
v2[3] = 11;
// Adapting the size of the dynamic and compressed vectors. The (optional) second parameter
// specifies whether the existing elements should be preserved. Per default, the existing
// elements are preserved.
v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain
// uninitialized, elements of class type are default constructed.
v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the
// new elements are NOT initialized!
v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved.
v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost.
\endcode
// Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors)
// on the vector:
\code
blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10
auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6]
v1.resize( 6UL ); // Resizing the vector invalidates the view
\endcode
// When the internal capacity of a vector is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicVector<int> v1;
v1.reserve( 100 );
v1.size(); // Returns 0
v1.capacity(); // Returns at least 100
\endcode
// Note that the size of the vector remains unchanged, but only the internal capacity is set
// according to the specified value!
//
// \n \subsection vector_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of vectors with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers
v1.resize( 10UL ); // Resize to 10, but the capacity is preserved
v1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c size(). Please
// also note that in case a reallocation occurs, all iterators (including \c end() iterators), all
// pointers and references to elements of the vector are invalidated.
//
// \subsection vector_operations_reset_clear reset() / clear()
//
// In order to reset all elements of a vector, the \c reset() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with 2.0F.
blaze::DynamicVector<float> v1( 3UL, 2.0F );
// Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged.
reset( v1 ); // Resetting all elements
v1.size(); // Returns 3: size and capacity remain unchanged
\endcode
// In order to return a vector to its default state (i.e. the state of a default constructed
// vector), the \c clear() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with -1.0F.
blaze::DynamicVector<float> v1( 5, -1.0F );
// Resetting the entire vector.
clear( v1 ); // Resetting the entire vector
v1.size(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// Note that resetting or clearing both dense and sparse vectors does not change the capacity
// of the vectors.
//
//
// \n \subsection vector_operations_swap swap()
//
// Via the \c swap() function it is possible to completely swap the contents of two vectors of
// the same type:
\code
blaze::DynamicVector<int,columnVector> v1( 10UL );
blaze::DynamicVector<int,columnVector> v2( 20UL );
swap( v1, v2 ); // Swapping the contents of v1 and v2
\endcode
// \n \section vector_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection vector_operations_normalize normalize()
//
// The \c normalize() function can be used to scale any non-zero vector to a length of 1. In
// case the vector does not contain a single non-zero element (i.e. is a zero vector), the
// \c normalize() function returns a zero vector.
\code
blaze::DynamicVector<float,columnVector> v1( 10UL );
blaze::CompressedVector<double,columnVector> v2( 12UL );
v1 = normalize( v1 ); // Normalizing the dense vector v1
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
v1 = normalize( v2 ); // Assigning v1 the normalized vector v2
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
\endcode
// Note that the \c normalize() function only works for floating point vectors. The attempt to
// use it for an integral vector results in a compile time error.
//
//
// \n \subsection vector_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single vector or multiple vectors. If
// passed a single vector, the functions return the smallest and largest element of the given
// dense or sparse vector, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 };
min( a ); // Returns -5
max( a ); // Returns 7
\endcode
// In case the vector currently has a size of 0, both functions return 0. Additionally, in case
// a given sparse vector is not completely filled, the zero elements are taken into account. For
// example, the following compressed vector has only two non-zero elements. However, the minimum
// of this vector is 0:
\code
blaze::CompressedVector<int> b( 4UL, 2UL );
b[0] = 1;
b[2] = 3;
min( b ); // Returns 0
\endcode
// If passed two or more dense vectors, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given vectors, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 };
blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 };
min( a, c ); // Results in the vector ( -5, 1, -7, -4 )
max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 )
\endcode
// Please note that sparse vectors can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a vector expression:
\code
min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector
max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector
min( a + c, c - d ); // Results in ( -10 -2 -7 0 )
max( a - c, c + d ); // Results in ( 0 4 14 6 )
\endcode
// \n \subsection vector_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a vector.
// For instance, the following computation
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 };
blaze::StaticVector<int,3UL,rowVector> b( abs( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
1 \\
2 \\
3 \\
\end{array}\right)\f$
// \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a vector, respectively:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = floor( a ); // Rounding down each element of the vector
b = ceil ( a ); // Rounding up each element of the vector
b = trunc( a ); // Truncating each element of the vector
b = round( a ); // Rounding each element of the vector
\endcode
// \n \subsection vector_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse vector to compute the complex
// conjugate of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Computing the vector of complex conjugates
// ( (-2, 1) )
// ( ( 1,-1) )
StaticVector<cplx,2UL> b;
b = conj( a );
\endcode
// Additionally, vectors can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicVector<cplx> c( 5UL );
conjugate( c ); // In-place conjugate operation.
c = conj( c ); // Same as above
\endcode
// \n \subsection vector_operators_real real()
//
// The \c real() function can be used on a dense or sparse vector to extract the real part of
// each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the real part of each vector element
// ( -2 )
// ( 1 )
StaticVector<double,2UL> b;
b = real( a );
\endcode
// \n \subsection vector_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse vector to extract the imaginary part
// of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the imaginary part of each vector element
// ( -1 )
// ( 1 )
StaticVector<double,2UL> b;
b = imag( a );
\endcode
// \n \subsection vector_operations_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// vector can be computed:
\code
blaze::DynamicVector<double> a, b, c;
b = sqrt( a ); // Computes the square root of each element
c = invsqrt( a ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a vector:
\code
blaze::HybridVector<double,3UL> a, b, c;
b = cbrt( a ); // Computes the cubic root of each element
c = invcbrt( a ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense vectors:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = hypot( a, b ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection vector_operations_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a vector to a specific range:
\code
blaze::DynamicVector<double> a, b
b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a vector.
// If passed a vector and a numeric exponent, the function computes the exponential value of each
// element of the vector using the same exponent. If passed a second vector, the function computes
// the componentwise exponential value:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = pow( a, 1.2 ); // Computes the exponential value of each element
c = pow( a, b ); // Computes the componentwise exponential value
\endcode
// \n \subsection vector_operations_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// vector, respectively:
\code
blaze::DynamicVector<double> a, b;
b = exp( a ); // Computes the base e exponential of each element
b = exp2( a ); // Computes the base 2 exponential of each element
b = exp10( a ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a vector:
\code
blaze::StaticVector<double,3UL> a, b;
b = log( a ); // Computes the natural logarithm of each element
b = log2( a ); // Computes the binary logarithm of each element
b = log10( a ); // Computes the common logarithm of each element
\endcode
// \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sin( a ); // Computes the sine of each element of the vector
b = cos( a ); // Computes the cosine of each element of the vector
b = tan( a ); // Computes the tangent of each element of the vector
b = asin( a ); // Computes the inverse sine of each element of the vector
b = acos( a ); // Computes the inverse cosine of each element of the vector
b = atan( a ); // Computes the inverse tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sinh( a ); // Computes the hyperbolic sine of each element of the vector
b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector
b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector
b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector
b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector
b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense vectors:
\code
blaze::DynamicVector<double> a, b, c;
c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection vector_operations_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a vector:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = erf( a ); // Computes the error function of each element
b = erfc( a ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse vector. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicVector<double> a, b;
b = map( a, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors. The following example demonstrates the merging of two vectors of double
// precision values into a vector of double precision complex numbers:
\code
blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 };
blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 };
blaze::DynamicVector< complex<double> > cplx;
// Creating the vector
// ( (-2.1, 0.3) )
// ( (-4.2, -1.4) )
// ( ( 1.0, 2.9) )
// ( ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n \section vector_operations_norms Norms
//
// \n \subsection vector_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double l2 = norm( a );
\endcode
// \n \subsection vector_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double l2 = sqrNorm( a );
\endcode
// \n \subsection vector_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double l1 = l1Norm( a );
\endcode
// \n \subsection vector_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double l2 = l2Norm( a );
\endcode
// \n \subsection vector_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double l3 = l3Norm( a );
\endcode
// \n \subsection vector_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector,
// where the norm is specified by the runtime argument \a p:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double lp = lpNorm( a, 2.3 );
\endcode
// \n \subsection vector_operations_norms_maxnorm maxNorm()
//
// The \c maxNorm() function computes the maximum norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double max = maxNorm( a );
\endcode
// \n Previous: \ref vector_types Next: \ref matrices
*/
//*************************************************************************************************
//**Matrices***************************************************************************************
/*!\page matrices Matrices
//
// \tableofcontents
//
//
// \n \section matrices_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix,
// \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix)
// and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be
// stored as row-major matrices or column-major matrices:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
// Setup of the 2x3 row-major dense matrix
//
// ( 1 2 3 )
// ( 4 5 6 )
//
DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 },
{ 4, 5, 6 } };
// Setup of the 3x2 column-major dense matrix
//
// ( 1 4 )
// ( 2 5 )
// ( 3 6 )
//
DynamicMatrix<int,columnMajor> B{ { 1, 4 },
{ 2, 5 },
{ 3, 6 } };
\endcode
// Per default, all matrices in \b Blaze are row-major matrices:
\code
// Instantiation of a 3x3 row-major matrix
blaze::DynamicMatrix<int> C( 3UL, 3UL );
\endcode
// \n \section matrices_details Matrix Details
// <hr>
//
// - \ref matrix_types
// - \ref matrix_operations
//
//
// \n \section matrices_examples Examples
// <hr>
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix
CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix
DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix
// ... Resizing and initialization
C = A * B;
\endcode
// \n Previous: \ref vector_operations Next: \ref matrix_types
*/
//*************************************************************************************************
//**Matrix Types***********************************************************************************
/*!\page matrix_types Matrix Types
//
// \tableofcontents
//
//
// \n \section matrix_types_static_matrix StaticMatrix
// <hr>
//
// The blaze::StaticMatrix class template is the representation of a fixed size matrix with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticMatrix.h>
\endcode
// The type of the elements, the number of rows and columns, and the storage order of the matrix
// can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class StaticMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c M : specifies the total number of rows of the matrix.
// - \c N : specifies the total number of columns of the matrix. Note that it is expected
// that StaticMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are
// known at compile time:
\code
// Definition of a 3x4 integral row-major matrix
blaze::StaticMatrix<int,3UL,4UL> A;
// Definition of a 4x6 single precision row-major matrix
blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B;
// Definition of a 6x4 double precision column-major matrix
blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_dynamic_matrix DynamicMatrix
// <hr>
//
// The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix
// with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/DynamicMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class DynamicMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best
// choice for medium to large matrices. The number of rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::DynamicMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::DynamicMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_hybrid_matrix HybridMatrix
// <hr>
//
// The HybridMatrix class template combines the flexibility of a dynamically sized matrix with
// the efficiency and performance of a fixed size matrix. It is implemented as a crossing between
// the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static
// matrix it uses static stack memory instead of dynamically allocated memory and similar to the
// dynamic matrix it can be resized (within the extend of the static memory). It can be included
// via the header file
\code
#include <blaze/math/HybridMatrix.h>
\endcode
// The type of the elements, the maximum number of rows and columns and the storage order of the
// matrix can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class HybridMatrix;
\endcode
// - Type: specifies the type of the matrix elements. HybridMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - M : specifies the maximum number of rows of the matrix.
// - N : specifies the maximum number of columns of the matrix. Note that it is expected
// that HybridMatrix is only used for tiny and small matrices.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions
// are not known at compile time or not fixed at runtime, but whose maximum dimensions are known
// at compile time:
\code
// Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8
blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16
blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6
blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_custom_matrix CustomMatrix
// <hr>
//
// The blaze::CustomMatrix class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data
// structure. Thus in contrast to all other dense matrix types a custom matrix does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom matrix can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomMatrix.h>
\endcode
// The type of the elements, the properties of the given array of elements and the storage order
// of the matrix can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool SO >
class CustomMatrix;
\endcode
// - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CustomMatrix is the right choice if any external array needs to be represented as
// a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>;
std::vector<int> vec( 12UL )
UnalignedUnpadded A( &vec[0], 3UL, 4UL );
// Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays
using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>;
std::unique_ptr<float[]> memory1( new float[40] );
UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL );
// Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) );
AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL );
// Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>;
std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) );
AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL );
\endcode
// In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several
// special characteristics. All of these result from the fact that a custom matrix is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref matrix_types_custom_matrix_memory_management</b>
// -# <b>\ref matrix_types_custom_matrix_copy_operations</b>
// -# <b>\ref matrix_types_custom_matrix_alignment</b>
// -# <b>\ref matrix_types_custom_matrix_padding</b>
//
// \n \subsection matrix_types_custom_matrix_memory_management Memory Management
//
// The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// matrix data structure. However, this flexibility comes with the price that the user of a custom
// matrix is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom matrices:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom matrix!
std::vector<int> vec( 12UL );
CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL );
// Definition of a custom 8x12 matrix for an aligned and padded integer array of
// capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr
// must be guaranteed to outlive the custom matrix!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) );
CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL );
\endcode
// \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations
//
// As with all dense matrices it is possible to copy construct a custom matrix:
\code
using blaze::CustomMatrix;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomMatrix<int,unaligned,unpadded>;
std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10
CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
a[1] = 20; // Also modifies the std::vector
CustomType B( a ); // Creating a copy of vector a
b[2] = 20; // Also affects matrix A and the std::vector
\endcode
// It is important to note that a custom matrix acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom matrix that is referencing and representing
// the same array as the original custom matrix.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom matrices, but modifies the values of the array:
\code
std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4
CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
A = C; // Copy assignment: Set all values of matrix A and B to 4.
\endcode
// \n \subsection matrix_types_custom_matrix_alignment Alignment
//
// In case the custom matrix is specified as \c aligned the passed array must adhere to some
// alignment restrictions based on the alignment requirements of the used data type and the
// used instruction set (SSE, AVX, ...). The restriction applies to the first element of each
// row/column: In case of a row-major matrix the first element of each row must be properly
// aligned, in case of a column-major matrix the first element of each column must be properly
// aligned. For instance, if a row-major matrix is used and AVX is active the first element of
// each row must be 32-bit aligned:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using blaze::rowMajor;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) );
CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL );
\endcode
// In the example, the row-major matrix has six columns. However, since with AVX eight integer
// values are loaded together the matrix is padded with two additional elements. This guarantees
// that the first element of each row is 32-bit aligned. In case the alignment requirements are
// violated, a \c std::invalid_argument exception is thrown.
//
// \n \subsection matrix_types_custom_matrix_padding Padding
//
// Adding padding elements to the end of each row/column can have a significant impact on the
// performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double
// precision matrices can be added via three SIMD addition operations:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomMatrix<double,aligned,padded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) );
// Creating padded custom 3x3 matrix with an additional padding element in each row
CustomType A( memory1.get(), 3UL, 3UL, 4UL );
CustomType B( memory2.get(), 3UL, 3UL, 4UL );
CustomType C( memory3.get(), 3UL, 3UL, 4UL );
// ... Initialization
C = A + B; // AVX-based matrix addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted a scalar addition has to be used:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomMatrix<double,aligned,unpadded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) );
// Creating unpadded custom 3x3 matrix
CustomType A( memory1.get(), 3UL, 3UL );
CustomType B( memory2.get(), 3UL, 3UL );
CustomType C( memory3.get(), 3UL, 3UL );
// ... Initialization
C = A + B; // Scalar matrix addition
\endcode
// Note that the construction of padded and unpadded aligned matrices looks identical. However,
// in case of padded matrices, \b Blaze will zero initialize the padding element and use them
// in all computations in order to achieve maximum performance. In case of an unpadded matrix
// \b Blaze will ignore the elements with the downside that it is not possible to load a complete
// row to an AVX register, which makes it necessary to fall back to a scalar addition.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom matrix the added padding elements must
// guarantee that the total number of elements in each row/column is a multiple of the SIMD
// vector width. In case of an unaligned padded matrix the number of padding elements can be
// greater or equal the number of padding elements of an aligned padded custom matrix. In case
// the padding is insufficient with respect to the available instruction set, a
// \c std::invalid_argument exception is thrown.
//
//
// \n \section matrix_types_compressed_matrix CompressedMatrix
// <hr>
//
// The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse
// matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be
// included via the header file
\code
#include <blaze/math/CompressedMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class CompressedMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices:
\code
// Definition of a 3x4 integral row-major matrix
blaze::CompressedMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::CompressedMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_identity_matrix IdentityMatrix
// <hr>
//
// The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary
// sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/IdentityMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class IdentityMatrix;
\endcode
// - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::IdentityMatrix is the perfect choice to represent an identity matrix:
\code
// Definition of a 3x3 integral row-major identity matrix
blaze::IdentityMatrix<int> A( 3UL );
// Definition of a 6x6 single precision row-major identity matrix
blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL );
// Definition of a double precision column-major identity matrix with 0 rows and columns
blaze::IdentityMatrix<double,blaze::columnMajor> C;
\endcode
// \n Previous: \ref matrices Next: \ref matrix_operations
*/
//*************************************************************************************************
//**Matrix Operations******************************************************************************
/*!\page matrix_operations Matrix Operations
//
// \tableofcontents
//
//
// \n \section matrix_operations_constructors Constructors
// <hr>
//
// Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules
// to be aware of:
// - In case the last template parameter (the storage order) is omitted, the matrix is per
// default stored in row-major order.
// - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection matrix_operations_default_construction Default Construction
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
// All matrices can be default constructed. Whereas the size of
// a StaticMatrix is fixed via the second and third template
// parameter, the initial size of a constructed DynamicMatrix
// or CompressedMatrix is 0.
StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major
// matrix. All elements are initialized to 0.
DynamicMatrix<float> M2; // Instantiation of a single precision dynamic
// row-major matrix with 0 rows and 0 columns.
DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic
// column-major matrix with 0 rows and 0 columns.
CompressedMatrix<int> M4; // Instantiation of a compressed integer
// row-major matrix of size 0x0.
CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision
// column-major matrix of size 0x0.
\endcode
// \n \subsection matrix_operations_size_construction Construction with Specific Size
//
// The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor
// that allows to immediately give the matrices a specific number of rows and columns:
\code
DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major
// matrix. The elements are not initialized.
HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major
// matrix. The elements are not initialized.
CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed
// column-major matrix.
\endcode
// Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately
// allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this
// example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory.
//
//
// \n \subsection matrix_operations_initialization_constructors Initialization Constructors
//
// All dense matrix classes offer a constructor for a direct, homogeneous initialization of all
// matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements
// can be specified.
\code
StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major
// matrix. All elements are initialized to 7.
DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major
// matrix. All elements are initialized to 2.0F.
CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major
// matrix with capacity for 4 non-zero elements.
\endcode
// \n \subsection matrix_operations_array_construction Array Construction
//
// Alternatively, all dense matrix classes offer a constructor for an initialization with a
// dynamic or static array. If the matrix is initialized from a dynamic array, the constructor
// expects the dimensions of values provided by the array as first and second argument, the
// array as third argument. In case of a static array, the fixed size of the array is used:
\code
const std::unique_ptr<double[]> array1( new double[6] );
// ... Initialization of the dynamic array
blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() );
int array2[2][2] = { { 4, -5 }, { -6, 7 } };
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 );
\endcode
// \n \subsection matrix_operations_initializer_list_construction
//
// In addition, all dense and sparse matrix classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F },
{ -0.9F, -1.2F },
{ 4.8F, 0.6F } };
blaze::CompressedMatrix<int,rowMajor> M15{ { 3 },
{ 1 },
{ 0, 2 } };
\endcode
// In case of sparse matrices, only the non-zero elements are used to initialize the matrix.
// Missing values are considered to be default values.
//
// \n \subsection matrix_operations_copy_construction Copy Construction
//
// All dense and sparse matrices can be created as a copy of another dense or sparse matrix.
\code
StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of the dense row-major matrix M16
// as copy of the dense row-major matrix M6.
DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of the dense column-major matrix M17
// as copy of the sparse column-major matrix M8.
CompressedMatrix<double,columnMajor> M18( M7 ); // Instantiation of the compressed column-major matrix
// M18 as copy of the dense row-major matrix M7.
CompressedMatrix<float,rowMajor> M19( M8 ); // Instantiation of the compressed row-major matrix
// M19 as copy of the compressed column-major matrix M8.
\endcode
// Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different
// number of rows and/or columns:
\code
StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: Number of rows and columns
// does not match!
StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: Number of columns does
// not match!
\endcode
// \n \section matrix_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse matrices:
// \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment,
// \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment.
//
//
// \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment
//
// It is possible to assign the same value to all elements of a dense matrix. All dense matrix
// classes provide an according assignment operator:
\code
blaze::StaticMatrix<int,3UL,2UL> M1;
blaze::DynamicMatrix<double> M2;
// Setting all integer elements of the StaticMatrix to 4
M1 = 4;
// Setting all double precision elements of the DynamicMatrix to 3.5
M2 = 3.5
\endcode
// \n \subsection matrix_operations_array_assignment Array Assignment
//
// Dense matrices can also be assigned a static array:
\code
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1;
blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2;
blaze::DynamicMatrix<double> M3;
int array1[2][2] = { { 1, 2 }, { 3, 4 } };
double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M1 = array1;
M2 = array1;
M3 = array2;
\endcode
// Note that the dimensions of the static array have to match the size of a \c StaticMatrix,
// whereas a \c DynamicMatrix is resized according to the array dimensions:
\f$ M3 = \left(\begin{array}{*{2}{c}}
3.1 & 6.4 \\
-0.9 & -1.2 \\
4.8 & 0.6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// matrix:
\code
blaze::DynamicMatrix<double> M1;
blaze::CompressedMatrix<int> M2;
M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } };
\endcode
// In case of sparse matrices, only the non-zero elements are considered. Missing values are
// considered to be default values.
//
// \n \subsection matrix_operations_copy_assignment Copy Assignment
//
// All kinds of matrices can be assigned to each other. The only restriction is that since a
// \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of
// rows and in the number of columns.
\code
blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL );
blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL );
blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL );
blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL );
// ... Initialization of the matrices
M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix
M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix
M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix
M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix
\endcode
// \n \subsection matrix_operations_compound_assignment Compound Assignment
//
// Compound assignment is also available for matrices: addition assignment, subtraction assignment,
// and multiplication assignment. In contrast to plain assignment, however, the number of rows
// and columns of the two operands have to match according to the arithmetic operation.
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL );
blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL );
blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL );
blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5;
blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL );
// ... Initialization of the matrices
M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions
M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix
M1 += M4; // Runtime error: No compound assignment between matrices of different size
M1 -= M5; // Compilation error: No compound assignment between matrices of different size
M2 *= M6; // OK: Multiplication assignment between two row-major matrices
\endcode
// Note that the multiplication assignment potentially changes the number of columns of the
// target matrix:
\f$\left(\begin{array}{*{3}{c}}
2 & 0 & 1 \\
0 & 3 & 2 \\
\end{array}\right) \times
\left(\begin{array}{*{2}{c}}
4 & 0 \\
1 & 0 \\
0 & 3 \\
\end{array}\right) =
\left(\begin{array}{*{2}{c}}
8 & 3 \\
3 & 6 \\
\end{array}\right)\f$
// Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a
// multiplication assignment with other square matrices of the same dimensions.
//
//
// \n \section matrix_operations_element_access Element Access
// <hr>
//
// \n \subsection matrix_operations_function_call_operator_1 Function Call Operator
//
// The easiest way to access a specific dense or sparse matrix element is via the function call
// operator. The indices to access a matrix are zero-based:
\code
blaze::DynamicMatrix<int> M1( 4UL, 6UL );
M1(0,0) = 1;
M1(0,1) = 3;
// ...
blaze::CompressedMatrix<double> M2( 5UL, 3UL );
M2(0,2) = 4.1;
M2(1,1) = -6.3;
\endcode
// Since dense matrices allocate enough memory for all contained elements, using the function
// call operator on a dense matrix directly returns a reference to the accessed value. In case
// of a sparse matrix, if the accessed value is currently not contained in the matrix, the
// value is inserted into the matrix prior to returning a reference to the value, which can
// be much more expensive than the direct access to a dense matrix. Consider the following
// example:
\code
blaze::CompressedMatrix<int> M1( 4UL, 4UL );
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( size_t j=0UL; j<M1.columns(); ++j ) {
... = M1(i,j);
}
}
\endcode
// Although the compressed matrix is only used for read access within the for loop, using the
// function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore
// the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators.
//
// \n \subsection matrix_operations_iterators Iterators
//
// All matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end() and \c cend() functions to traverse all contained elements by iterator. Note that
// it is not possible to traverse all elements of the matrix, but that it is only possible to
// traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and
// \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of
// a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> M1( 4UL, 6UL );
// Traversing the matrix by Iterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
// Traversing the matrix by ConstIterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) {
// ...
}
}
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) {
// ...
}
}
\endcode
// \n \section matrix_operations_element_insertion Element Insertion
// <hr>
//
// Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse
// matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements
// to the matrix.
//
// \n \subsection matrix_operations_function_call_operator_2 Function Call Operator
//
// The first possibility to add elements to a sparse matrix is the function call operator:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int> M1( 3UL, 4UL );
M1(1,2) = 9;
\endcode
// In case the element at the given position is not yet contained in the sparse matrix, it is
// automatically inserted. Otherwise the old value is replaced by the new value 2. The operator
// returns a reference to the sparse vector element.
//
// \n \subsection matrix_operations_set .set()
//
// An alternative to the function call operator is the \c set() function: In case the element is
// not yet contained in the matrix the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at position (2,0)
M1.set( 2, 0, 1 );
\endcode
// \n \subsection matrix_operations_insert .insert()
// The insertion of elements can be better controlled via the \c insert() function. In contrast
// to the function call operator and the \c set() function it emits an exception in case the
// element is already contained in the matrix. In order to check for this case, the \c find()
// function can be used:
\code
// In case the element at position (2,3) is not yet contained in the matrix it is inserted
// with a value of 4.
if( M1.find( 2, 3 ) == M1.end( 2 ) )
M1.insert( 2, 3, 4 );
\endcode
// \n \subsection matrix_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not
// suited for the setup of large sparse matrices. A very efficient, yet also very low-level
// way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to
// provide enough capacity to insert a new element in the specified row/column. Additionally,
// the index of the new element must be larger than the index of the previous element in the
// same row/column. Violating these conditions results in undefined behavior!
\code
M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0
M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1
M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2
// ...
\endcode
// The most efficient way to fill a sparse matrix with elements, however, is a combination of
// \c reserve(), \c append(), and the \c finalize() function:
\code
// Setup of the compressed row-major matrix
//
// ( 0 1 0 2 0 )
// A = ( 0 0 0 0 0 )
// ( 3 0 0 0 0 )
//
blaze::CompressedMatrix<int> M1( 3UL, 5UL );
M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements
M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1
M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3
M1.finalize( 0 ); // Finalizing row 0
M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2
M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0
M1.finalize( 2 ); // Finalizing row 2
\endcode
// \note The \c finalize() function has to be explicitly called for each row or column, even
// for empty ones!
// \note Although \c append() does not allocate new memory, it still invalidates all iterators
// returned by the \c end() functions!
//
//
// \n \section matrix_operations_element_removal Element Removal
// <hr>
//
// \subsection matrix_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse matrix. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Erasing the element at position (21,23)
A.erase( 21, 23 );
// Erasing a single element in row 17 via iterator
A.erase( 17, A.find( 4 ) );
// Erasing all non-zero elements in the range [7..24] of row 33
A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
A.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) );
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) );
A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } );
\endcode
// \n \section matrix_operations_element_lookup Element Lookup
// <hr>
//
// A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever
// accessing a matrix element at a specific position a lookup operation is required. Whereas the
// function call operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection matrix_operations_find .find()
//
// The \c find() function can be used to check whether a specific element is contained in the
// sparse matrix. It specifically searches for the element at the specified position. In case
// the element is found, the function returns an iterator to the element. Otherwise an iterator
// just past the last non-zero element of the according row or column (the \c end() iterator)
// is returned. Note that the returned iterator is subject to invalidation due to inserting
// operations via the function call operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the element at position (7,17). In case the element is not
// contained in the vector, the end() iterator of row 7 is returned.
CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) );
if( pos != A.end( 7 ) ) {
// ...
}
\endcode
// \n \subsection matrix_operations_lowerbound .lowerBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index not less then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index not less then the given row
// index. In combination with the \c upperBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of column index 17 in row 7.
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) );
// Searching the upper bound of column index 28 in row 7
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) );
// Erasing all elements in the specified range
A.erase( 7, pos1, pos2 );
\endcode
// \n \subsection matrix_operations_upperbound .upperBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index greater then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index greater then the given row
// index. In combination with the \c lowerBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,columnMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of row index 17 in column 9.
CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) );
// Searching the upper bound of row index 28 in column 9
CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) );
// Erasing all elements in the specified range
A.erase( 9, pos1, pos2 );
\endcode
// \n \section matrix_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection matrix_operations_rows .rows()
//
// The current number of rows of a matrix can be acquired via the \c rows() member function:
\code
// Instantiating a dynamic matrix with 10 rows and 8 columns
blaze::DynamicMatrix<int> M1( 10UL, 8UL );
M1.rows(); // Returns 10
// Instantiating a compressed matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.rows(); // Returns 8
\endcode
// Alternatively, the free functions \c rows() can be used to query the current number of rows of
// a matrix. In contrast to the member function, the free function can also be used to query the
// number of rows of a matrix expression:
\code
rows( M1 ); // Returns 10, i.e. has the same effect as the member function
rows( M2 ); // Returns 8, i.e. has the same effect as the member function
rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix
\endcode
// \n \subsection matrix_operations_columns .columns()
//
// The current number of columns of a matrix can be acquired via the \c columns() member function:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
M1.columns(); // Returns 8
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
M2.columns(); // Returns 7
\endcode
// There is also a free function \c columns() available, which can also be used to query the number
// of columns of a matrix expression:
\code
columns( M1 ); // Returns 8, i.e. has the same effect as the member function
columns( M2 ); // Returns 7, i.e. has the same effect as the member function
columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix
\endcode
// \n \subsection matrix_operations_capacity .capacity()
//
// The \c capacity() member function returns the internal capacity of a dense or sparse matrix.
// Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of
// a dense matrix the capacity will always be greater or equal than the total number of elements
// of the matrix. In case of a sparse matrix, the capacity will usually be much less than the
// total number of elements.
\code
blaze::DynamicMatrix<float> M1( 5UL, 7UL );
blaze::StaticMatrix<float,7UL,4UL> M2;
M1.capacity(); // Returns at least 35
M2.capacity(); // Returns at least 28
\endcode
// There is also a free function \c capacity() available to query the capacity. However, please
// note that this function cannot be used to query the capacity of a matrix expression:
\code
capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function
capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function
capacity( M1 * M2 ); // Compilation error!
\endcode
// \n \subsection matrix_operations_nonzeros .nonZeros()
//
// For both dense and sparse matrices the current number of non-zero elements can be queried
// via the \c nonZeros() member function. In case of matrices there are two flavors of the
// \c nonZeros() function: One returns the total number of non-zero elements in the matrix,
// the second returns the number of non-zero elements in a specific row (in case of a row-major
// matrix) or column (in case of a column-major matrix). Sparse matrices directly return their
// number of non-zero elements, dense matrices traverse their elements and count the number of
// non-zero elements.
\code
blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL );
// ... Initializing the dense matrix
M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix
M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2
\endcode
\code
blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL );
// ... Initializing the sparse matrix
M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix
M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in a
// matrix expression. However, the result is not the exact number of non-zero elements, but may be
// a rough estimation:
\code
nonZeros( M1 ); // Has the same effect as the member function
nonZeros( M1, 2 ); // Has the same effect as the member function
nonZeros( M2 ); // Has the same effect as the member function
nonZeros( M2, 3 ); // Has the same effect as the member function
nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression
\endcode
// \n \subsection matrix_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number
// elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
// If at least one element of the matrix is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for matrices with floating point
// elements. The attempt to use it for a matrix with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection matrix_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse matrix is in default state:
\code
blaze::HybridMatrix<int,5UL,4UL> A;
// ... Resizing and initialization
if( isDefault( A ) ) { ... }
\endcode
// A matrix is in default state if it appears to just have been default constructed. All resizable
// matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in
// default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all
// submatrices) is in default state if all its elements are in default state. For instance, in case
// the matrix is instantiated for a built-in integral or floating point data type, the function
// returns \c true in case all matrix elements are 0 and \c false in case any matrix element is
// not 0.
//
//
// \n \subsection matrix_operations_isSquare isSquare()
//
// Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the
// number of columns) can be checked via the \c isSquare() function:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
if( isSquare( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_issymmetric isSymmetric()
//
// Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix
// is symmetric:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isSymmetric( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be symmetric!
//
//
// \n \subsection matrix_operations_isUniform isUniform()
//
// In order to check if all matrix elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isUniform( A ) ) { ... }
\endcode
// Note that in case of a sparse matrix also the zero elements are also taken into account!
//
//
// \n \subsection matrix_operations_islower isLower()
//
// Via the \c isLower() function it is possible to check whether a dense or sparse matrix is
// lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower triangular!
//
//
// \n \subsection matrix_operations_isunilower isUniLower()
//
// Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is
// lower unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlylower isStrictlyLower()
//
// Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix
// is strictly lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly lower triangular!
//
//
// \n \subsection matrix_operations_isUpper isUpper()
//
// Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is
// upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper triangular!
//
//
// \n \subsection matrix_operations_isuniupper isUniUpper()
//
// Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is
// upper unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper()
//
// Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix
// is strictly upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly upper triangular!
//
//
// \n \subsection matrix_operations_isdiagonal isDiagonal()
//
// The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix,
// i.e. if it has only elements on its diagonal and if the non-diagonal elements are default
// elements:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isDiagonal( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be diagonal!
//
//
// \n \subsection matrix_operations_isidentity isIdentity()
//
// The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix,
// i.e. if all diagonal elements are 1 and all non-diagonal elements are 0:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isIdentity( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be identity matrices!
//
//
// \n \subsection matrix_operations_matrix_determinant det()
//
// The determinant of a square dense matrix can be computed by means of the \c det() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
double d = det( A ); // Compute the determinant of A
\endcode
// In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
// \note The \c det() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if the
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operations_matrix_trans trans()
//
// Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into
// a column-major matrix and vice versa:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL );
blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL );
M1 = M2; // Assigning a column-major matrix to a row-major matrix
M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1
M1 += trans( M2 ); // Addition assignment of two row-major matrices
\endcode
// \n \subsection matrix_operations_ctrans ctrans()
//
// The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian
// conjugate, or transjugate) can be computed via the \c ctrans() function:
\code
blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL );
blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL );
M1 = ctrans( M2 ); // Compute the conjugate transpose matrix
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix
M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix
\endcode
// \n \subsection matrix_operations_matrix_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given matrix expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a lower and a strictly lower dense
// matrix:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::StrictlyLowerMatrix;
LowerMatrix< DynamicMatrix<double> > A;
StrictlyLowerMatrix< DynamicMatrix<double> > B;
// ... Resizing and initialization
auto C = evaluate( A * B );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary matrix is created and no copy operation is performed. Instead, the result
// is directly written to the target matrix due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation
LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation
DynamicMatrix<double> F( A * B ); // Temporary & copy operation
D = evaluate( A * B ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicMatrix<double> A, B, C, D;
D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix
D = A + eval( B * C ); // No creation of a temporary matrix
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section matrix_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection matrix_operations_resize_reserve .resize() / .reserve()
//
// The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template
// parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns
// of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<int,rowMajor> M1;
CompressedMatrix<int,columnMajor> M2( 3UL, 2UL );
// Adapting the number of rows and columns via the resize() function. The (optional)
// third parameter specifies whether the existing elements should be preserved. Per
// default, the existing elements are preserved.
M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type
// remain uninitialized, elements of class type are default
// constructed.
M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the
// new elements are NOT initialized!
M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved.
M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost.
\endcode
// Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices)
// on the matrix:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix
auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix
M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view
\endcode
// When the internal capacity of a matrix is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicMatrix<int> M1;
M1.reserve( 100 );
M1.rows(); // Returns 0
M1.capacity(); // Returns at least 100
\endcode
// Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or
// column (for a column-major matrix):
\code
blaze::CompressedMatrix<int> M1( 4UL, 6UL );
M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1
\endcode
// \n \subsection matrix_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of matrices with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix
M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved
M1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c rows() times
// \c columns(). Please also note that in case a reallocation occurs, all iterators (including
// \c end() iterators), all pointers and references to elements of this matrix are invalidated.
//
//
// \subsection matrix_operations_reset_clear reset() / clear
//
// In order to reset all elements of a dense or sparse matrix, the \c reset() function can be
// used. The number of rows and columns of the matrix are preserved:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
reset( M1 ); // Resetting all elements
M1.rows(); // Returns 4: size and capacity remain unchanged
\endcode
// Alternatively, only a single row or column of the matrix can be resetted:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix
blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix
reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix
reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix
\endcode
// In order to reset a row of a column-major matrix or a column of a row-major matrix, use a
// row or column view (see \ref views_rows and views_colums).
//
// In order to return a matrix to its default state (i.e. the state of a default constructed
// matrix), the \c clear() function can be used:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
clear( M1 ); // Resetting the entire matrix
M1.rows(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// \n \subsection matrix_operations_matrix_transpose transpose()
//
// In addition to the non-modifying \c trans() function, matrices can be transposed in-place via
// the \c transpose() function:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
transpose( M ); // In-place transpose operation.
M = trans( M ); // Same as above
\endcode
// Note however that the transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_ctranspose ctranspose()
//
// The \c ctranspose() function can be used to perform an in-place conjugate transpose operation:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
ctranspose( M ); // In-place conjugate transpose operation.
M = ctrans( M ); // Same as above
\endcode
// Note however that the conjugate transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_swap swap()
//
// Via the \c \c swap() function it is possible to completely swap the contents of two matrices
// of the same type:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL );
blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL );
swap( M1, M2 ); // Swapping the contents of M1 and M2
\endcode
// \n \section matrix_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection matrix_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single matrix or multiple matrices. If
// passed a single matrix, the functions return the smallest and largest element of the given
// dense or sparse matrix, respectively:
\code
using blaze::rowMajor;
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 },
{ -4, 0, 1 } };
min( A ); // Returns -5
max( A ); // Returns 7
\endcode
// In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in
// case a given sparse matrix is not completely filled, the zero elements are taken into account.
// For example: the following compressed matrix has only 2 non-zero elements. However, the minimum
// of this matrix is 0:
\code
blaze::CompressedMatrix<int> B{ { 1, 0, 3 },
{ 0, 0, 0 } };
min( B ); // Returns 0
\endcode
// If passed two or more dense matrices, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given matrices, respectively:
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } };
min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 )
max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 )
\endcode
// Please note that sparse matrices can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a matrix expression:
\code
min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix
max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix
\endcode
// \n \subsection matrix_operators_trace trace()
//
// The \c trace() function sums the diagonal elements of a square dense or sparse matrix:
\code
blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 }
, { -4, -5, 6 }
, { 7, -8, -9 } };
trace( A ); // Returns the sum of the diagonal elements, i.e. -15
\endcode
// In case the given matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
//
// \n \subsection matrix_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a matrix.
// For instance, the following computation
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 },
{ 4, -5, 6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a matrix, respectively:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = floor( A ); // Rounding down each element of the matrix
B = ceil ( A ); // Rounding up each element of the matrix
B = trunc( A ); // Truncating each element of the matrix
B = round( A ); // Rounding each element of the matrix
\endcode
// \n \subsection matrix_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse matrix to compute the complex
// conjugate of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Computing the matrix of conjugate values
// ( (1, 0) (-2, 1) )
// ( (1,-1) ( 0,-1) )
StaticMatrix<cplx,2UL,2UL> B;
B = conj( A );
\endcode
// Additionally, matrices can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicMatrix<cplx> C( 5UL, 2UL );
conjugate( C ); // In-place conjugate operation.
C = conj( C ); // Same as above
\endcode
// \n \subsection matrix_operators_real real()
//
// The \c real() function can be used on a dense or sparse matrix to extract the real part of
// each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the real part of each matrix element
// ( 1 -2 )
// ( 1 0 )
StaticMatrix<double,2UL,2UL> B;
B = real( A );
\endcode
// \n \subsection matrix_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part
// of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the imaginary part of each matrix element
// ( 0 -1 )
// ( 1 1 )
StaticMatrix<double,2UL,2UL> B;
B = imag( A );
\endcode
// \n \subsection matrix_operators_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// matrix can be computed:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
B = sqrt( A ); // Computes the square root of each element
C = invsqrt( A ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a matrix:
\code
blaze::DynamicMatrix<double> A, B, C;
B = cbrt( A ); // Computes the cubic root of each element
C = invcbrt( A ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = hypot( A, B ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection matrix_operators_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a matrix to a specific range:
\code
blaze::DynamicMatrix<double> A, B;
B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a matrix.
// If passed a matrix and a numeric exponent, the function computes the exponential value of each
// element of the matrix using the same exponent. If passed a second matrix, the function computes
// the componentwise exponential value:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = pow( A, 1.2 ); // Computes the exponential value of each element
C = pow( A, B ); // Computes the componentwise exponential value
\endcode
// \n \subsection matrix_operators_exp exp()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// matrix, respectively:
\code
blaze::HybridMatrix<double,3UL,3UL> A, B;
B = exp( A ); // Computes the base e exponential of each element
B = exp2( A ); // Computes the base 2 exponential of each element
B = exp10( A ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = log( A ); // Computes the natural logarithm of each element
B = log2( A ); // Computes the binary logarithm of each element
B = log10( A ); // Computes the common logarithm of each element
\endcode
// \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sin( A ); // Computes the sine of each element of the matrix
B = cos( A ); // Computes the cosine of each element of the matrix
B = tan( A ); // Computes the tangent of each element of the matrix
B = asin( A ); // Computes the inverse sine of each element of the matrix
B = acos( A ); // Computes the inverse cosine of each element of the matrix
B = atan( A ); // Computes the inverse tangent of each element of the matrix
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix
B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix
B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix
B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix
B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix
B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix
\endcode
// \n \subsection matrix_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense matrices:
\code
blaze::DynamicMatrix<double> A, B, C;
C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection matrix_operators_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = erf( A ); // Computes the error function of each element
B = erfc( A ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on matrices. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse matrix. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense matrices. The following example demonstrates the merging of two matrices of double
// precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( (-2.1, 0.3) (-4.2, -1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n \section matrix_operations_norms Norms
//
// \n \subsection matrix_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double l2 = norm( A );
\endcode
// \n \subsection matrix_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double l2 = sqrNorm( A );
\endcode
// \n \subsection matrix_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double l1 = l1Norm( A );
\endcode
// \n \subsection matrix_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double l2 = l2Norm( A );
\endcode
// \n \subsection matrix_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double l3 = l3Norm( A );
\endcode
// \n \subsection matrix_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix,
// where the norm is specified by the runtime argument \a p:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double lp = lpNorm( A, 2.3 );
\endcode
// \n \subsection matrix_operations_norms_maxnorm maxNorm()
//
// The \c maxNorm() function computes the maximum norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double max = maxNorm( A );
\endcode
// \n \section matrix_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection matrix_operations_declsym declsym()
//
// The \c declsym() operation can be used to explicitly declare any matrix or matrix expression
// as symmetric:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declsym( A );
\endcode
// Any matrix or matrix expression that has been declared as symmetric via \c declsym() will
// gain all the benefits of a symmetric matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
DynamicMatrix<double> A, B, C;
SymmetricMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isSymmetric( declsym( A ) ); // Will always return true without runtime effort
S = declsym( A ); // Omit any runtime check for symmetry
C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declsym() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-symmetric matrix or
// matrix expression as symmetric via the \c declsym() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declherm declherm()
//
// The \c declherm() operation can be used to explicitly declare any matrix or matrix expression
// as Hermitian:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declherm( A );
\endcode
// Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will
// gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
DynamicMatrix<double> A, B, C;
HermitianMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isHermitian( declherm( A ) ); // Will always return true without runtime effort
S = declherm( A ); // Omit any runtime check for Hermitian symmetry
C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declherm() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-Hermitian matrix or
// matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decllow decllow()
//
// The \c decllow() operation can be used to explicitly declare any matrix or matrix expression
// as lower triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decllow( A );
\endcode
// Any matrix or matrix expression that has been declared as lower triangular via \c decllow()
// will gain all the benefits of a lower triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
DynamicMatrix<double> A, B, C;
LowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isLower( decllow( A ) ); // Will always return true without runtime effort
L = decllow( A ); // Omit any runtime check for A being a lower matrix
C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decllow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-lower matrix or
// matrix expression as lower triangular via the \c decllow() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declupp declupp()
//
// The \c declupp() operation can be used to explicitly declare any matrix or matrix expression
// as upper triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declupp( A );
\endcode
// Any matrix or matrix expression that has been declared as upper triangular via \c declupp()
// will gain all the benefits of a upper triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UpperMatrix;
DynamicMatrix<double> A, B, C;
UpperMatrix< DynamicMatrix<double> > U;
// ... Resizing and initialization
isUpper( declupp( A ) ); // Will always return true without runtime effort
U = declupp( A ); // Omit any runtime check for A being a upper matrix
C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-upper matrix or
// matrix expression as upper triangular via the \c declupp() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decldiag decldiag()
//
// The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression
// as diagonal:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decldiag( A );
\endcode
// Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will
// gain all the benefits of a diagonal matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isDiagonal( decldiag( A ) ); // Will always return true without runtime effort
D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix
C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decldiag() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-diagonal matrix
// or matrix expression as diagonal via the \c decldiag() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declid declid()
//
// The \c declid() operation can be used to explicitly declare any matrix or matrix expression
// as identity matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declid( A );
\endcode
// Any matrix or matrix expression that has been declared as identity matrix via \c declid() will
// gain all the benefits of an identity matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isIdentity( declid( A ) ); // Will always return true without runtime effort
D = declid( A ); // Omit any runtime check for A being a diagonal matrix
C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an
// identity matrix, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declid() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-identity matrix
// or matrix expression as identity matrix via the \c declid() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \section matrix_operations_matrix_inversion Matrix Inversion
// <hr>
//
// The inverse of a square dense matrix can be computed via the \c inv() function:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = inv( A ); // Compute the inverse of A
\endcode
// Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert()
// function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
invert( A ); // In-place matrix inversion
\endcode
// Both the \c inv() and the \c invert() functions will automatically select the most suited matrix
// inversion algorithm depending on the size and type of the given matrix. For small matrices of
// up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices
// larger than 6x6 the inversion is performed by means of the most suited matrix decomposition
// method: In case of a general matrix the LU decomposition is used, for symmetric matrices the
// LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and
// for triangular matrices the inverse is computed via a forward or back substitution.
//
// In case the type of the matrix does not provide additional compile time information about its
// structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually
// when calling the \c invert() function:
\code
using blaze::asGeneral;
using blaze::asSymmetric;
using blaze::asHermitian;
using blaze::asLower;
using blaze::asUniLower;
using blaze::asUpper;
using blaze::asUniUpper;
using blaze::asDiagonal;
invert<asGeneral> ( A ); // In-place inversion of a general matrix
invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix
invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix
invert<asLower> ( A ); // In-place inversion of a lower triangular matrix
invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix
invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix
invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix
invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix
\endcode
// Alternatively, via the \c invert() function it is possible to explicitly specify the inversion
// algorithm:
\code
using blaze::byLU;
using blaze::byLDLT;
using blaze::byLDLH;
using blaze::byLLH;
// In-place inversion of a general matrix by means of an LU decomposition
invert<byLU>( A );
// In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLT>( A );
// In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLH>( A );
// In-place inversion of a positive definite matrix by means of a Cholesky decomposition
invert<byLLH>( A );
\endcode
// Whereas the inversion by means of an LU decomposition works for every general square matrix,
// the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is
// restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works
// for Hermitian positive definite matrices. Please note that it is in the responsibility of the
// function caller to guarantee that the selected algorithm is suited for the given matrix. In
// case this precondition is violated the result can be wrong and might not represent the inverse
// of the given matrix!
//
// For both the \c inv() and \c invert() function the matrix inversion fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The matrix inversion can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c inv() function. Also, it is not possible to access individual elements via the function call
// operator on the expression object:
\code
row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression!
inv( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The inversion functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the matrix may already have been modified.
//
//
// \n \section matrix_operations_decomposition Matrix Decomposition
// <hr>
//
// \note All decomposition functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \subsection matrix_operations_decomposition_lu LU Decomposition
//
// The LU decomposition of a dense matrix can be computed via the \c lu() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a row-major matrix
assert( A == L * U * P );
\endcode
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a column-major matrix
assert( A == P * L * U );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the
// three matrices \c A, \c L and \c U are required to have the same storage order. Also, please
// note that the way the permutation matrix \c P needs to be applied differs between row-major and
// column-major matrices, since the algorithm uses column interchanges for row-major matrices and
// row interchanges for column-major matrices.
//
// Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates
// the LU decomposition of a symmetric matrix into a lower and upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U;
blaze::DynamicMatrix<double,blaze::columnMajor> P;
lu( A, L, U, P ); // LU decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition
//
// The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
llh( A, L ); // LLH decomposition of a row-major matrix
assert( A == L * ctrans( L ) );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A
// and \c L can have any storage order.
//
// Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates
// the LLH decomposition of a symmetric matrix into a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
llh( A, L ); // Cholesky decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_qr QR Decomposition
//
// The QR decomposition of a dense matrix can be computed via the \c qr() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
blaze::DynamicMatrix<double,blaze::rowMajor> R;
qr( A, Q, R ); // QR decomposition of a row-major matrix
assert( A == Q * R );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c R can have any storage order.
//
// Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates
// the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R;
qr( A, Q, R ); // QR decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_rq RQ Decomposition
//
// Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via
// the \c rq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> R;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
rq( A, R, Q ); // RQ decomposition of a row-major matrix
assert( A == R * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c R and \c Q can have any storage order.
//
// Also the \c rq() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the RQ decomposition of an Hermitian matrix into a general
// matrix and an upper triangular matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
rq( A, R, Q ); // RQ decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_ql QL Decomposition
//
// The QL decomposition of a dense matrix can be computed via the \c ql() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::DynamicMatrix<double,blaze::columnMajor> L;
ql( A, Q, L ); // QL decomposition of a row-major matrix
assert( A == Q * L );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c L can have any storage order.
//
// Also the \c ql() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the QL decomposition of a symmetric matrix into a general
// matrix and a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
ql( A, Q, L ); // QL decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_lq LQ Decomposition
//
// The LQ decomposition of a dense matrix can be computed via the \c lq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
lq( A, L, Q ); // LQ decomposition of a row-major matrix
assert( A == L * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c L and \c Q can have any storage order.
//
// Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates
// the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
lq( A, L, Q ); // LQ decomposition of A
\endcode
// \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors
// <hr>
//
// The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V );
} // namespace blaze
\endcode
// The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second
// function additionally computes the eigenvectors. The eigenvalues are returned in the given vector
// \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the
// correct dimensions (if possible and necessary).
//
// Depending on the given matrix type, the resulting eigenvalues are either of floating point
// or complex type: In case the given matrix is either a compile time symmetric matrix with
// floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues
// will be of floating point type and therefore the elements of the given eigenvalue vector are
// expected to be of floating point type. In all other cases they are expected to be of complex
// type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except
// that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having
// the positive imaginary part first.
//
// In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V,
// in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of
// \a V. In case the given matrix is a compile time symmetric matrix with floating point elements,
// the resulting eigenvectors will be of floating point type and therefore the elements of the
// given eigenvector matrix are expected to be of floating point type. In all other cases they
// are expected to be of complex type.
//
// The following examples give an impression of the computation of eigenvalues and eigenvectors
// for a general, a symmetric, and an Hermitian matrix:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A
// ... Initialization
DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// In all failure cases an exception is thrown.
//
// \note All \c eigen() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of
// LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available
// and linked to the executable. Otherwise a linker error will be created.
//
//
// \n \section matrix_operations_singularvalues Singular Values/Singular Vectors
// <hr>
//
// The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd()
// functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 >
void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST >
size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The first and third function compute only singular values of the given general \a m-by-\a n
// matrix, the second and fourth function additionally compute singular vectors. The resulting
// singular values are returned in the given vector \a s, the left singular vectors are returned
// in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s,
// \a U, and \a V are resized to the correct dimensions (if possible and necessary).
//
// The third and fourth function allow for the specification of a subset of singular values and/or
// vectors. The number of singular values and vectors to be computed is specified by the lower
// bound \a low and the upper bound \a upp, which either form an integral or a floating point
// range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// In all failure cases an exception is thrown.
//
// Examples:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V );
\endcode
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V, 0, 2 );
\endcode
// \note All \c svd() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the singular values and/or singular vectors of a dense matrix by
// means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is
// available and linked to the executable. Otherwise a linker error will be created.
//
//
// \n Previous: \ref matrix_types Next: \ref adaptors
*/
//*************************************************************************************************
//**Adaptors***************************************************************************************
/*!\page adaptors Adaptors
//
// \tableofcontents
//
//
// \section adaptors_general General Concepts
// <hr>
//
// Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the
// matrices such that certain invariants are preserved. Due to this adaptors can provide a compile
// time guarantee of certain properties, which can be exploited for optimized performance.
//
// The \b Blaze library provides a total of 9 different adaptors:
//
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices
// <ul>
// <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_lowermatrix </li>
// <li> \ref adaptors_triangular_matrices_unilowermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_uppermatrix </li>
// <li> \ref adaptors_triangular_matrices_uniuppermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Diagonal Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_diagonalmatrix </li>
// </ul>
// </li>
// </ul>
// </li>
// </ul>
//
// In combination with the general matrix types, \b Blaze provides a total of 40 different matrix
// types that make it possible to exactly adapt the type of matrix to every specific problem.
//
//
// \n \section adaptors_examples Examples
// <hr>
//
// The following code examples give an impression on the use of adaptors. The first example shows
// the multiplication between two lower matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant
// performance advantage in comparison to a general matrix multiplication, especially for large
// matrices.
//
// The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse
// vector multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which significantly increases the performance.
//
// \n Previous: \ref matrix_operations Next: \ref adaptors_symmetric_matrices
*/
//*************************************************************************************************
//**Symmetric Matrices*****************************************************************************
/*!\page adaptors_symmetric_matrices Symmetric Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_symmetric_matrices_general Symmetric Matrices
// <hr>
//
// In contrast to general matrices, which have no restriction in their number of rows and columns
// and whose elements can have any value, symmetric matrices provide the compile time guarantee
// to be square matrices with pair-wise identical values. Mathematically, this means that a
// symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal
// values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can
// be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze
// library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix
// class template.
//
//
// \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix
// <hr>
//
// The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it
// by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its
// transpose \f$ A = A^T \f$). It can be included via the header file
\code
#include <blaze/math/SymmetricMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class SymmetricMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible symmetric matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense symmetric matrix with static memory
blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense symmetric matrix based on HybridMatrix
blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix
blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision symmetric matrix
blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E;
\endcode
// The storage order of a symmetric matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices
// <hr>
//
// A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the symmetry constraint:
//
// -# <b>\ref adaptors_symmetric_matrices_square</b>
// -# <b>\ref adaptors_symmetric_matrices_symmetry</b>
// -# <b>\ref adaptors_symmetric_matrices_initialization</b>
//
// \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 symmetric static matrix
SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced!
//
// This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are
// symmetric themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, row-major 3x3 symmetric compressed matrix
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
// Initializing three elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
*A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a symmetric dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-symmetric dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; symmetric invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Setup of the symmetric matrix
//
// ( 0 1 3 )
// A = ( 1 2 0 )
// ( 3 0 0 )
//
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0)
A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1)
A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2)
\endcode
// The symmetry property is also enforced for symmetric custom matrices: In case the given array
// of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::SymmetricMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 symmetric custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomSymmetric A( array, 3UL ); // OK
// Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomSymmetric B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the
// symmetric matrix. The following example demonstrates that modifying the elements of an entire
// row of the symmetric matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of the symmetric matrix
//
// ( 0 1 0 2 )
// A = ( 1 3 4 0 )
// ( 0 4 0 5 )
// ( 2 0 5 0 )
//
SymmetricMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = 1;
A(0,3) = 2;
A(1,1) = 3;
A(1,2) = 4;
A(2,3) = 5;
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( 0 0 0 2 )
// A = ( 0 0 0 0 )
// ( 0 0 0 5 )
// ( 2 0 5 0 )
//
row( A, 1 ) = 0;
\endcode
// The next example demonstrates the (compound) assignment to submatrices of symmetric matrices.
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the symmetric property of
// dense symmetric matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A SymmetricMatrix matrix can participate in numerical operations in any way any other dense
// or sparse matrix can participate. It can also be combined with any other dense or sparse vector
// or matrix. The following code example gives an impression of the use of SymmetricMatrix within
// arithmetic operations:
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix
// to be assigned is not symmetric at compile time, a runtime check is performed.
//
//
// \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices
// <hr>
//
// It is also possible to use symmetric block matrices:
\code
using blaze::CompressedMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
// Definition of a 3x3 symmetric block matrix based on CompressedMatrix
SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 );
\endcode
// Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and
// guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also
// applied to element \f$ a_{ji} \f$:
\code
// Inserting the elements (2,4) and (4,2)
A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } } );
// Manipulating the elements (2,4) and (4,2)
A(2,4)(1,1) = -5;
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_symmetric_matrices_performance Performance Considerations
// <hr>
//
// When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. The \b Blaze library
// tries to exploit the properties of symmetric matrices whenever possible. However, there are
// also situations when using a symmetric matrix introduces some overhead. The following examples
// demonstrate several situations where symmetric matrices can positively or negatively impact
// performance.
//
// \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
SymmetricMatrix< CompressedMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the
// SymmetricMatrix adapter is obviously an advantage.
//
// \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL );
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a symmetric matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not symmetric at compile time:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
SymmetricMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the symmetric matrix; no performance penalty
C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead
C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead
\endcode
// When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary
// to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property
// of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two symmetric matrices does not necessarily result in another symmetric matrix:
\code
SymmetricMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a symmetric matrix; no runtime overhead
C = A - B; // Results in a symmetric matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors Next: \ref adaptors_hermitian_matrices
*/
//*************************************************************************************************
//**Hermitian Matrices*****************************************************************************
/*!\page adaptors_hermitian_matrices Hermitian Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_hermitian_matrices_general Hermitian Matrices
// <hr>
//
// In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices.
// Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise
// conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal
// to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have
// a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze
// library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix
// class template.
//
//
// \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix
// <hr>
//
// The HermitianMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to
// its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file
\code
#include <blaze/math/HermitianMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class HermitianMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible Hermitian matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense Hermitian matrix with static memory
blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix
blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix
blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C;
// Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix
blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision complex Hermitian matrix
blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E;
\endcode
// The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices
//
// The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits.
// However, there are a couple of differences, both from a mathematical point of view as well as
// from an implementation point of view.
//
// From a mathematical point of view, a matrix is called symmetric when it is equal to its
// transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate
// transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two
// conditions coincide, which means that symmetric matrices of real values are also Hermitian
// and Hermitian matrices of real values are also symmetric.
//
// From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data
// types (i.e. all integral types except \c bool, floating point and complex types), whereas
// symmetric matrices can also be block matrices (i.e. can have vector or matrix elements).
// For built-in element types, the HermitianMatrix adaptor behaves exactly like the according
// SymmetricMatrix implementation. For complex element types, however, the Hermitian property
// is enforced (see also \ref adaptors_hermitian_matrices_hermitian).
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::HermitianMatrix;
using blaze::SymmetricMatrix;
// The following two matrices provide an identical experience (including performance)
HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric
SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric
// The following two matrices will behave differently
HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian
SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric
// Hermitian block matrices are not allowed
HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error!
SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix
\endcode
// \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices
// <hr>
//
// A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the Hermitian symmetry constraint:
//
// -# <b>\ref adaptors_hermitian_matrices_square</b>
// -# <b>\ref adaptors_hermitian_matrices_hermitian</b>
// -# <b>\ref adaptors_hermitian_matrices_initialization</b>
//
// \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 Hermitian static matrix
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced!
//
// This means that the following properties of a Hermitian matrix are always guaranteed:
//
// - The diagonal elements are real numbers, i.e. the imaginary part is zero
// - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$
//
// Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that
// are Hermitian themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Default constructed, row-major 3x3 Hermitian compressed matrix
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
// Initializing the matrix via the function call operator
//
// ( (1, 0) (0,0) (2,1) )
// ( (0, 0) (0,0) (0,0) )
// ( (2,-1) (0,0) (0,0) )
//
A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0)
A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
//
// ( (1,-3) (0,0) (2, 1) )
// ( (0, 0) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
//
// ( (1,-3) (8,1) (2, 1) )
// ( (8,-1) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
*A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
//
// ( (0, 0) (8,1) (0, 0) )
// ( (8,-1) (2,0) (4,-2) )
// ( (0, 0) (4,2) (0, 0) )
//
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) },
{ cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) },
{ cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
C = D; // Throws an exception; Hermitian invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,2) (3,-4) )
// A = ( (1,-2) (2,0) (0, 0) )
// ( (3, 4) (0,0) (0, 0) )
//
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0)
A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1)
A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2)
\endcode
// The Hermitian property is also enforced for Hermitian custom matrices: In case the given array
// of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::HermitianMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 Hermitian custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomHermitian A( array, 3UL ); // OK
// Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomHermitian B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the
// Hermitian matrix. The following example demonstrates that modifying the elements of an entire
// row of the Hermitian matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermtianMatrix;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,-1) (0,0) (2, 1) )
// A = ( (1, 1) (3, 0) (4,2) (0, 0) )
// ( (0, 0) (4,-2) (0,0) (5,-3) )
// ( (2,-1) (0, 0) (5,3) (0, 0) )
//
HermitianMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = cplx( 1.0, -1.0 );
A(0,3) = cplx( 2.0, 1.0 );
A(1,1) = cplx( 3.0, 0.0 );
A(1,2) = cplx( 4.0, 2.0 );
A(2,3) = cplx( 5.0, 3.0 );
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( (0, 0) (0,0) (0,0) (2, 1) )
// A = ( (0, 0) (0,0) (0,0) (0, 0) )
// ( (0, 0) (0,0) (0,0) (5,-3) )
// ( (2,-1) (0,0) (5,3) (0, 0) )
//
row( A, 1 ) = cplx( 0.0, 0.0 );
\endcode
// The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices.
// Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian
// symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
std::complex<double> cplx;
// Setup of two default 4x4 Hermitian matrices
HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( (1,-1) (2, 5) )
// B = ( (3, 0) (4,-6) )
// ( (5, 0) (6, 0) )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = cplx( 1.0, -1.0 );
B(0,1) = cplx( 2.0, 5.0 );
B(1,0) = cplx( 3.0, 0.0 );
B(1,1) = cplx( 4.0, -6.0 );
B(2,1) = cplx( 5.0, 0.0 );
B(2,2) = cplx( 6.0, 7.0 );
// OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved
//
// ( (0, 0) (0, 0) (1,-1) (2, 5) )
// A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) )
// ( (1, 1) (3, 0) (5, 0) (6, 0) )
// ( (2,-5) (4, 6) (6, 0) (0, 0) )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( (0, 0) (1,-1) (2,5) (0,0) )
// A2 = ( (1, 1) (3, 0) (X,X) (0,0) )
// ( (2,-5) (X, X) (6,0) (0,0) )
// ( (0, 0) (0, 0) (0,0) (0,0) )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the Hermitian property of
// dense Hermitian matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A HermitianMatrix can be used within all numerical operations in any way any other dense or
// sparse matrix can be used. It can also be combined with any other dense or sparse vector or
// matrix. The following code example gives an impression of the use of HermitianMatrix within
// arithmetic operations:
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
using cplx = complex<float>;
DynamicMatrix<cplx,rowMajor> A( 3, 3 );
CompressedMatrix<cplx,rowMajor> B( 3, 3 );
HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 );
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 );
HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E;
HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix
// to be assigned is not Hermitian at compile time, a runtime check is performed.
//
//
// \n \section adaptors_hermitian_matrices_performance Performance Considerations
// <hr>
//
// When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. This is particularly
// true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The
// \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever
// possible. However, there are also situations when using a Hermitian matrix introduces some
// overhead. The following examples demonstrate several situations where Hermitian matrices can
// positively or negatively impact performance.
//
// \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric
HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a
// symmetric matrix is obviously an advantage.
//
// \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a Hermitian matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not Hermitian at compile time:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
HermitianMatrix< DynamicMatrix< complex<double> > > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the Hermitian matrix; no performance penalty
C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead
C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead
\endcode
// When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary
// to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property
// of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix:
\code
HermitianMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a Hermitian matrix; no runtime overhead
C = A - B; // Results in a Hermitian matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors_symmetric_matrices Next: \ref adaptors_triangular_matrices
*/
//*************************************************************************************************
//**Triangular Matrices****************************************************************************
/*!\page adaptors_triangular_matrices Triangular Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_triangular_matrices_general Triangular Matrices
// <hr>
//
// Triangular matrices come in three flavors: Lower triangular matrices provide the compile time
// guarantee to be square matrices and that the upper part of the matrix contains only default
// elements that cannot be modified. Upper triangular matrices on the other hand provide the
// compile time guarantee to be square and that the lower part of the matrix contains only fixed
// default elements. Finally, diagonal matrices provide the compile time guarantee to be square
// and that both the lower and upper part of the matrix contain only immutable default elements.
// These properties can be exploited to gain higher performance and/or to save memory. Within the
// \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized
// by the following class templates:
//
// Lower triangular matrices:
// - <b>\ref adaptors_triangular_matrices_lowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_unilowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b>
//
// Upper triangular matrices:
// - <b>\ref adaptors_triangular_matrices_uppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b>
//
// Diagonal matrices
// - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b>
//
//
// \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix
// <hr>
//
// The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/LowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class LowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense lower matrix with static memory
blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense lower matrix based on HybridMatrix
blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense lower matrix based on DynamicMatrix
blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense lower matrix based on CustomMatrix
blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision lower matrix
blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E;
\endcode
// The storage order of a lower matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix
// <hr>
//
// The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements above the diagonal are 0 (lower unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 1 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower unitriangular matrices:
\code
// Definition of a 3x3 row-major dense unilower matrix with static memory
blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense unilower matrix based on HybridMatrix
blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense unilower matrix based on DynamicMatrix
blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision unilower matrix
blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a lower unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the unilower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix
// <hr>
//
// The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements above the diagonal are 0 (strictly lower triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 0 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly lower triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly lower matrix with static memory
blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix
blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix
blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly lower matrix
blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly lower triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly lower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix
// <hr>
//
// The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & u_{2,2} & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & u_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper matrices:
\code
// Definition of a 3x3 row-major dense upper matrix with static memory
blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense upper matrix based on HybridMatrix
blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense upper matrix based on DynamicMatrix
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision upper matrix
blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix
// <hr>
//
// The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements below the diagonal are 0 (upper unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 1 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 1 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper unitriangular matrices:
\code
// Definition of a 3x3 row-major dense uniupper matrix with static memory
blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense uniupper matrix based on HybridMatrix
blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix
blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision uniupper matrix
blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the uniupper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix
// <hr>
//
// The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements below the diagonal are 0 (strictly upper triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 0 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 0 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly upper triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly upper matrix with static memory
blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix
blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix
blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly upper matrix
blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly upper triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly upper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix
// <hr>
//
// The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all matrix elements above and below the diagonal
// are 0 (diagonal matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
0 & l_{1,1} & 0 & \cdots & 0 \\
0 & 0 & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/DiagonalMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class DiagonalMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible diagonal matrices:
\code
// Definition of a 3x3 row-major dense diagonal matrix with static memory
blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense diagonal matrix based on HybridMatrix
blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix
blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision diagonal matrix
blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a diagonal matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices
// <hr>
//
// A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the triangular matrix constraint:
//
// -# <b>\ref adaptors_triangular_matrices_square</b>
// -# <b>\ref adaptors_triangular_matrices_triangular</b>
// -# <b>\ref adaptors_triangular_matrices_initialization</b>
// -# <b>\ref adaptors_triangular_matrices_storage</b>
// -# <b>\ref adaptors_triangular_matrices_scaling</b>
//
// \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 lower dynamic matrix
LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 lower static matrix
LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced!
//
// This means that it is only allowed to modify elements in the lower part or the diagonal of
// a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix.
// Unitriangular and strictly triangular matrices are even more restrictive and don't allow the
// modification of diagonal elements. Also, triangular matrices can only be assigned matrices that
// don't violate their triangular property. The following example demonstrates this restriction
// by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >;
// Default constructed, row-major 3x3 lower compressed matrix
CompressedLower A( 3 );
// Initializing elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(2,0) = 2.0; // Initialization of the lower element (2,0)
A(1,2) = 9.0; // Throws an exception; invalid modification of upper element
// Inserting two more elements via the insert() function
A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0)
A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1)
A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element
// Appending an element via the append() function
A.reserve( 1, 3 ); // Reserving enough capacity in row 1
A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1)
A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part
// Access via a non-const iterator
CompressedLower::Iterator it = A.begin(1);
*it = 6.0; // Modifies the lower element (1,0)
++it;
*it = 9.0; // Modifies the diagonal element (1,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 2, 0 ); // Erasing the lower element (2,0)
// Construction from a lower dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-lower dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; lower matrix invariant would be violated!
\endcode
// The triangular property is also enforced during the construction of triangular custom matrices:
// In case the given array of elements does not represent the according triangular matrix type, a
// \c std::invalid_argument exception is thrown:
\code
using blaze::CustomMatrix;
using blaze::LowerMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 lower custom matrix from a properly initialized array
double array[9] = { 1.0, 0.0, 0.0,
2.0, 3.0, 0.0,
4.0, 5.0, 6.0 };
CustomLower A( array, 3UL ); // OK
// Attempt to create a second 3x3 lower custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomLower B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...)
// on the triangular matrix. The following example demonstrates that modifying the elements of an
// entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements.
// Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
// Setup of the lower matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 0 3 0 0 )
// ( 4 0 5 0 )
//
LowerMatrix< DynamicMatrix<int> > A( 4 );
A(1,0) = 1;
A(1,1) = 2;
A(2,1) = 3;
A(3,0) = 4;
A(3,2) = 5;
// Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 9 9 9 0 )
// ( 4 0 5 0 )
//
row( A, 2 ) = 9;
// Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in
//
// ( 0 0 0 0 )
// A = ( 1 7 0 0 )
// ( 9 7 7 0 )
// ( 4 7 7 0 )
//
submatrix( A, 0, 1, 4, 2 ) = 7;
\endcode
// The next example demonstrates the (compound) assignment to rows/columns and submatrices of
// triangular matrices. Since only lower/upper and potentially diagonal elements may be modified
// the matrix to be assigned must be structured such that the triangular matrix invariant of the
// matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::LowerMatrix;
using blaze::rowVector;
// Setup of two default 4x4 lower matrices
LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of a 4-dimensional vector
//
// v = ( 1 2 3 0 )
//
DynamicVector<int,rowVector> v{ 1, 2, 3, 0 };
// OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant
//
// ( 0 0 0 0 )
// A1 = ( 0 0 0 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 2 ) = v; // OK
// Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element
// marked with X cannot be assigned and triggers an exception.
//
// ( 0 0 0 0 )
// A1 = ( 1 2 X 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 1 ) = v; // Assignment throws an exception!
// Setup of the 3x2 dynamic matrix
//
// ( 0 0 )
// B = ( 7 0 )
// ( 8 9 )
//
DynamicMatrix<int> B( 3UL, 2UL, 0 );
B(1,0) = 7;
B(2,0) = 8;
B(2,1) = 9;
// OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved
//
// ( 0 0 0 0 )
// A2 = ( 0 7 0 0 )
// ( 0 8 9 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be
// preserved! The elements marked with X cannot be assigned without violating the invariant!
//
// ( 0 0 0 0 )
// A2 = ( 0 7 X 0 )
// ( 0 8 8 X )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency during the creation of a dense lower or
// upper matrix this initialization is important since otherwise the lower/upper matrix property
// of dense lower matrices would not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// 5x5 row-major lower dynamic matrix with default initialized upper matrix
LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
// 7x7 column-major upper dynamic matrix with default initialized lower matrix
UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 );
// 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix
DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 );
\endcode
// \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements!
//
// All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable
// elements in the lower or upper part, respectively. Therefore dense triangular matrices don't
// provide any kind of memory reduction! There are two main reasons for this: First, storing also
// the zero elements guarantees maximum performance for many algorithms that perform vectorized
// operations on the triangular matrices, which is especially true for small dense matrices.
// Second, conceptually all triangular adaptors merely restrict the interface to the matrix type
// \c MT and do not change the data layout or the underlying matrix type.
//
// This property matters most for diagonal matrices. In order to achieve the perfect combination
// of performance and memory consumption for a diagonal matrix it is recommended to use dense
// matrices for small diagonal matrices and sparse matrices for large diagonal matrices:
\code
// Recommendation 1: use dense matrices for small diagonal matrices
using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >;
// Recommendation 2: use sparse matrices for large diagonal matrices
using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >;
\endcode
// \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled!
//
// Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible
// to self-scale such a matrix:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
UniLowerMatrix< DynamicMatrix<int> > A( 4 );
A *= 2; // Compilation error; Scale operation is not available on an unilower matrix
A /= 2; // Compilation error; Scale operation is not available on an unilower matrix
A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix
A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix
A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix
\endcode
// \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A lower and upper triangular matrix can participate in numerical operations in any way any other
// dense or sparse matrix can participate. It can also be combined with any other dense or sparse
// vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix
// within arithmetic operations:
\code
using blaze::LowerMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a triangular matrix. In case the
// matrix to be assigned does not satisfy the invariants of the triangular matrix at compile
// time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular
// and strictly triangular matrix types can be used in the same way, but may pose some additional
// restrictions (see the according class documentations).
//
//
// \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices
// <hr>
//
// It is also possible to use triangular block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Definition of a 5x5 lower block matrix based on DynamicMatrix
LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Definition of a 7x7 upper block matrix based on CompressedMatrix
UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to
// manipulate elements in the upper part (lower triangular matrix) or the lower part (upper
// triangular matrix) of the matrix:
\code
const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } };
A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception
B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception
\endcode
// Note that unitriangular matrices are restricted to numeric element types and therefore cannot
// be used for block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::UniLowerMatrix;
using blaze::UniUpperMatrix;
// Compilation error: lower unitriangular matrices are restricted to numeric element types
UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Compilation error: upper unitriangular matrices are restricted to numeric element types
UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_triangular_matrices_performance Performance Considerations
// <hr>
//
// The \b Blaze library tries to exploit the properties of lower and upper triangular matrices
// whenever and wherever possible. Therefore using triangular matrices instead of a general
// matrices can result in a considerable performance improvement. However, there are also
// situations when using a triangular matrix introduces some overhead. The following examples
// demonstrate several common situations where triangular matrices can positively or negatively
// impact performance.
//
// \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. The following example demonstrates this by
// means of a dense matrix/dense matrix multiplication with lower triangular matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// In comparison to a general matrix multiplication, the performance advantage is significant,
// especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix
// and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular,
// respectively. Note however that the performance advantage is most pronounced for dense matrices
// and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar performance improvement can be gained when using a triangular matrix in a matrix/vector
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
DynamicVector<double,columnVector> x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example, \b Blaze also exploits the structure of the matrix and approx. halves the
// runtime of the multiplication. Also in case of matrix/vector multiplications the performance
// improvement is most pronounced for dense matrices and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for
// read access), which introduces absolutely no performance penalty, using a triangular matrix
// on the left-hand side of an assignment (i.e. for write access) may introduce additional
// overhead when it is assigned a general matrix, which is not triangular at compile time:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
LowerMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the lower matrix; no performance penalty
C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead
C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead
\endcode
// When assigning a general (potentially not lower triangular) matrix to a lower matrix or a
// general (potentially not upper triangular) matrix to an upper matrix it is necessary to check
// whether the matrix is lower or upper at runtime in order to guarantee the triangular property
// of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as
// efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime
// overhead it is therefore generally advisable to assign lower or upper triangular matrices to
// other lower or upper triangular matrices.\n
// In this context it is especially noteworthy that the addition, subtraction, and multiplication
// of two triangular matrices of the same structure always results in another triangular matrix:
\code
LowerMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a lower matrix; no runtime overhead
C = A - B; // Results in a lower matrix; no runtime overhead
C = A * B; // Results in a lower matrix; no runtime overhead
\endcode
\code
UpperMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a upper matrix; no runtime overhead
C = A - B; // Results in a upper matrix; no runtime overhead
C = A * B; // Results in a upper matrix; no runtime overhead
\endcode
// \n Previous: \ref adaptors_hermitian_matrices Next: \ref views
*/
//*************************************************************************************************
//**Views******************************************************************************************
/*!\page views Views
//
// \tableofcontents
//
//
// \section views_general General Concepts
// <hr>
//
// Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific
// row, column, or band of a matrix. As such, views act as a reference to a specific part of a
// vector or matrix. This reference is valid and can be used in every way as any other vector
// or matrix can be used as long as the referenced vector or matrix is not resized or entirely
// destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) via the view are immediately
// visible in the vector or matrix and changes made via the vector or matrix are immediately
// visible in the view.
//
// The \b Blaze library provides the following views on vectors and matrices:
//
// Vector views:
// - \ref views_subvectors
// - \ref views_element_selections
//
// Matrix views:
// - \ref views_submatrices
// - \ref views_rows
// - \ref views_row_selections
// - \ref views_columns
// - \ref views_column_selections
// - \ref views_bands
//
//
// \n \section views_examples Examples
\code
using blaze::DynamicMatrix;
using blaze::StaticVector;
// Setup of the 3x5 row-major matrix
//
// ( 1 0 -2 3 0 )
// ( 0 2 5 -1 -1 )
// ( 1 0 0 2 1 )
//
DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 },
{ 0, 2, 5, -1, -1 },
{ 1, 0, 0, 2, 1 } };
// Setup of the 2-dimensional row vector
//
// ( 18 19 )
//
StaticVector<int,rowVector> vec{ 18, 19 };
// Assigning to the elements (1,2) and (1,3) via a subvector of a row
//
// ( 1 0 -2 3 0 )
// ( 0 2 18 19 -1 )
// ( 1 0 0 2 1 )
//
subvector( row( A, 1UL ), 2UL, 2UL ) = vec;
\endcode
// \n Previous: \ref adaptors_triangular_matrices Next: \ref views_subvectors
*/
//*************************************************************************************************
//**Subvectors*************************************************************************************
/*!\page views_subvectors Subvectors
//
// \tableofcontents
//
//
// Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors
// act as a reference to a specific range within a vector. This reference is valid and can be
// used in every way any other dense or sparse vector can be used as long as the vector containing
// the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the
// vector elements in the specified range: Changes made to the elements (e.g. modifying values,
// inserting or erasing elements) are immediately visible in the vector and changes made via the
// vector are immediately visible in the subvector.
//
//
// \n \section views_subvectors_setup Setup of Subvectors
// <hr>
//
// A view on a dense or sparse subvector can be created very conveniently via the \c subvector()
// function. It can be included via the header file
\code
#include <blaze/math/Subvector.h>
\endcode
// The first parameter specifies the offset of the subvector within the underlying dense or sparse
// vector, the second parameter specifies the size of the subvector. The two parameters can be
// specified either at compile time or at runtime:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments)
auto sv1 = subvector<4UL,12UL>( x );
// Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments)
auto sv2 = subvector( x, 8UL, 16UL );
\endcode
// The \c subvector() function returns an expression representing the subvector view. The type of
// this expression depends on the given subvector arguments, primarily the type of the vector and
// the compile time arguments. If the type is required, it can be determined via \c decltype or
// via the \c SubvectorExprTrait class template:
\code
using VectorType = blaze::DynamicVector<int>;
using SubvectorType1 = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) );
using SubvectorType2 = blaze::SubvectorExprTrait<VectorType,4UL,12UL>::Type;
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A subvector created
// from a row vector can be used as any other row vector, a subvector created from a column vector
// can be used as any other column vector. The view can also be used on both sides of an assignment:
// The subvector can either be used as an alias to grant write access to a specific subvector of a
// vector primitive on the left-hand side of an assignment or to grant read-access to a specific
// subvector of a vector primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9])
auto sv = subvector( x, 0UL, 10UL );
// Setting the first ten elements of x to the 2nd row of matrix A
sv = row( A, 2UL );
// Setting the second ten elements of x to y
subvector( x, 10UL, 10UL ) = y;
// Setting the 3rd row of A to a subvector of x
row( A, 3UL ) = subvector( x, 3UL, 10UL );
// Setting x to a subvector of the result of the addition between y and the 1st row of A
x = subvector( y + row( A, 1UL ), 2UL, 5UL );
\endcode
// \n \section views_subvectors_element_access Element Access
// <hr>
//
// The elements of a subvector can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
auto sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
// The numbering of the subvector elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the specified size of the subvector. Alternatively, the elements of a subvector can
// be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// subvectors an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // OK: Write access to the dense subvector value.
... = *it; // OK: Read access to the dense subvector value.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense subvector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_subvectors_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse subvector can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v
// The subscript operator provides access to all possible elements of the sparse subvector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse subvector, the element is inserted into the
// subvector.
sv[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the subvector it is inserted into the subvector, if it is already contained
// in the subvector its value is modified.
sv.set( 45UL, -1.2 );
// An alternative for inserting elements into the subvector is the insert() function. However,
// it inserts the element only in case the element is not already contained in the subvector.
sv.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In
// case of subvectors, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the subvector and that the subvector's
// capacity is large enough to hold the new element. Note however that due to the nature of
// a subvector, which may be an alias to the middle of a sparse vector, the append() function
// does not work as efficiently for a subvector as it does for a vector.
sv.reserve( 10UL );
sv.append( 51UL, -2.1 );
\endcode
// \n \section views_subvectors_common_operations Common Operations
// <hr>
//
// A subvector view can be used like any other dense or sparse vector. This means that with
// only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used.
// For instance, the current number of elements can be obtained via the \c size() function, the
// current capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since subvectors are references to a specific range of a
// vector, several operations are not possible, such as resizing and swapping. The following
// example shows this by means of a dense subvector view:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Creating a view on the range [5..15] of vector v
auto sv = subvector( v, 5UL, 10UL );
sv.size(); // Returns the number of elements in the subvector
sv.capacity(); // Returns the capacity of the subvector
sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector
sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector
auto sv2 = subvector( v, 15UL, 10UL );
swap( sv, sv2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_subvectors_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse subvectors can be used in all arithmetic operations that any other dense
// or sparse vector can be used in. The following example gives an impression of the use of dense
// subvectors within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1
sv = d2; // Dense vector initialization of the range [0..9]
subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19]
d3 = sv + d2; // Dense vector/dense vector addition
s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition
d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication
subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6]
d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9]
d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9]
subvector( d1, 0UL , 10UL ) += d2; // Addition assignment
subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment
subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment
double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors
\endcode
// \n \section views_aligned_subvectors Aligned Subvectors
// <hr>
//
// Usually subvectors can be defined anywhere within a vector. They may start at any position and
// may have an arbitrary size (only restricted by the size of the underlying vector). However, in
// contrast to vectors themselves, which are always properly aligned in memory and therefore can
// provide maximum performance, this means that subvectors in general have to be considered to be
// unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Identical creations of an unaligned subvector in the range [8..23]
auto sv1 = subvector ( x, 8UL, 16UL );
auto sv2 = subvector<unaligned>( x, 8UL, 16UL );
auto sv3 = subvector<8UL,16UL> ( x );
auto sv4 = subvector<unaligned,8UL,16UL>( x );
\endcode
// All of these calls to the \c subvector() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide
// full flexibility in the creation of subvectors, this might result in performance disadvantages
// in comparison to vector primitives (even in case the specified subvector could be aligned).
// Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a vector might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned subvectors. Aligned subvectors are identical to
// unaligned subvectors in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying vector. Aligned subvectors are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of the subvector must be aligned. The following source code gives some examples
// for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double
// values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicVector<double,blaze::columnVector> d( 17UL );
// ... Resizing and initialization
// OK: Starts at the beginning, i.e. the first element is aligned
auto dsv1 = subvector<aligned>( d, 0UL, 13UL );
// OK: Start index is a multiple of 4, i.e. the first element is aligned
auto dsv2 = subvector<aligned>( d, 4UL, 7UL );
// OK: The start index is a multiple of 4 and the subvector includes the last element
auto dsv3 = subvector<aligned>( d, 8UL, 9UL );
// Error: Start index is not a multiple of 4, i.e. the first element is not aligned
auto dsv4 = subvector<aligned>( d, 5UL, 8UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense subvectors.
// In contrast, aligned sparse subvectors at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned subvector is created:
\code
using blaze::aligned;
blaze::CompressedVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// \n Previous: \ref views Next: \ref views_element_selections
*/
//*************************************************************************************************
//**Element Selections*****************************************************************************
/*!\page views_element_selections Element Selections
//
// \tableofcontents
//
//
// Element selections provide views on arbitrary compositions of elements of dense and sparse
// vectors. These views act as a reference to the selected elements and represent them as another
// dense or sparse vector. This reference is valid and can be used in every way any other dense
// or sparse vector can be used as long as the vector containing the elements is not resized or
// entirely destroyed. The element selection also acts as an alias to the vector elements in the
// specified range: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the vector and changes made via the vector are immediately
// visible in the elements.
//
//
// \n \section views_element_selections_setup Setup of Element Selections
//
// An element selection can be created very conveniently via the \c elements() function. It can
// be included via the header file
\code
#include <blaze/math/Elements.h>
\endcode
// The indices of the elements to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Selecting the elements 4, 6, 8, and 10 (compile time arguments)
auto e1 = elements<4UL,6UL,8UL,10UL>( x );
// Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto e2 = elements( x, { 3UL, 2UL, 1UL } );
auto e3 = elements( x, list );
// Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto e4 = elements( x, array );
auto e5 = elements( x, array.data(), array.size() );
// Selecting the element 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto e6 = elements( x, vector );
auto e7 = elements( x, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the elements of the underlying vector in any order. Also note
// that it is possible to use the same index multiple times. The \c elements() function returns an
// expression representing the view on the selected elements. The type of this expression depends
// on the given arguments, primarily the type of the vector and the compile time arguments. If the
// type is required, it can be determined via \c decltype or via the \c ElementsExprTrait class
// template:
\code
using VectorType = blaze::DynamicVector<int>;
using ElementsType1 = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) );
using ElementsType2 = blaze::ElementsExprTrait<VectorType,4UL,12UL>::Type;
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. An element selection
// created from a row vector can be used as any other row vector, an element selection created
// from a column vector can be used as any other column vector. The view can also be used on both
// sides of an assignment: It can either be used as an alias to grant write access to specific
// elements of a vector primitive on the left-hand side of an assignment or to grant read-access
// to specific elements of a vector primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the elements 1, 3, 5, and 7
auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } );
// Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A
e = row( A, 2UL );
// Setting the elements 2, 4, 6, and 8 of x to y
elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y;
// Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x
row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between y and the 1st row of A
x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } )
\endcode
// Please note that using an element selection, which refers to an index multiple times, on the
// left-hand side of an assignment leads to undefined behavior:
\code
blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 };
blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 };
auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times
e = b; // Undefined behavior
\endcode
// In this example both vectors have the same size, which results in a correct vector assignment,
// but the final value of the element at index 1 is unspecified.
//
//
// \n \section views_element_selections_element_access Element Access
//
// The elements of an element selection can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Selecting the elements 2, 4, 6, and 8
auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } );
// Setting the 1st element of the element selection, which corresponds to
// the element at index 4 in vector v
e[1] = 2.0;
\endcode
// The numbering of the selected elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of selected elements. Alternatively, the elements of an element selection
// can be traversed via iterators. Just as with vectors, in case of non-const element selections,
// \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of
// constant element selections an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of dense vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
*it = ...; // OK: Write access to the dense vector value.
... = *it; // OK: Read access to the dense vector value.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense vector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of sparse vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_element_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse element selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
std::vector<size_t> indices;
// ... Selecting indices of the sparse vector
auto e = elements( v, indices );
// The subscript operator provides access to the selected elements of the sparse vector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse vector, the element is inserted.
e[42] = 2.0;
// The second operation for inserting elements via the element selection is the set() function.
// In case the element is not contained in the vector it is inserted into the vector, if it is
// already contained in the vector its value is modified.
e.set( 45UL, -1.2 );
// An alternative for inserting elements into the vector is the insert() function. However, it
// inserts the element only in case the element is not already contained in the vector.
e.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In case
// of element selections, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the selection and that the selections's
// capacity is large enough to hold the new element. Note however that due to the nature of an
// element selection, which is an alias to arbitrary elements of a sparse vector, the append()
// function does not work as efficiently for an element selection as it does for a vector.
e.reserve( 10UL );
e.append( 51UL, -2.1 );
\endcode
// \n \section views_element_selections_common_operations Common Operations
//
// An element selection can be used like any other dense or sparse vector. For instance, the
// number of selected elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since element selections are references to a specific range of a vector,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of an element selection on a dense vector:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Selecting the elements 5 and 10
auto e = elements( v, { 5UL, 10UL } );
e.size(); // Returns the number of elements in the element selection
e.capacity(); // Returns the capacity of the element selection
e.nonZeros(); // Returns the number of non-zero elements contained in the element selection
e.resize( 84UL ); // Compilation error: Cannot resize an element selection
auto e2 = elements( v, { 15UL, 10UL } );
swap( e, e2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_element_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse element selections can be used in all arithmetic operations that any other
// dense or sparse vector can be used in. The following example gives an impression of the use of
// dense element selections within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse
// element selections with fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21]
e = d2; // Dense vector assignment to the selected elements
elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements
d3 = e + d2; // Dense vector/dense vector addition
s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition
d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication
elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements
d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements
d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements
elements( d1, indices1 ) += d2; // Addition assignment
elements( d1, indices2 ) -= s2; // Subtraction assignment
elements( d1, indices3 ) *= e; // Multiplication assignment
double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_subvectors Next: \ref views_submatrices
*/
//*************************************************************************************************
//**Submatrices************************************************************************************
/*!\page views_submatrices Submatrices
//
// \tableofcontents
//
//
// Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors
// provide views on specific parts of vectors. As such, submatrices act as a reference to a
// specific block within a matrix. This reference is valid and can be used in evary way any
// other dense or sparse matrix can be used as long as the matrix containing the submatrix is
// not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements
// in the specified block: Changes made to the elements (e.g. modifying values, inserting or
// erasing elements) are immediately visible in the matrix and changes made via the matrix are
// immediately visible in the submatrix.
//
//
// \n \section views_submatrices_setup Setup of Submatrices
// <hr>
//
// A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix()
// function. It can be included via the header file
\code
#include <blaze/math/Submatrix.h>
\endcode
// The first and second parameter specify the row and column of the first element of the submatrix.
// The third and fourth parameter specify the number of rows and columns, respectively. The four
// parameters can be specified either at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments)
auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A );
// Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments)
auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL );
\endcode
// The \c submatrix() function returns an expression representing the submatrix view. The type of
// this expression depends on the given submatrix arguments, primarily the type of the matrix and
// the compile time arguments. If the type is required, it can be determined via \c decltype or
// via the \c SubmatrixExprTrait class template:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using SubmatrixType1 = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
using SubmatrixType2 = blaze::SubmatrixExprTrait<MatrixType,3UL,0UL,4UL,8UL>::Type;
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from
// a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major
// matrix will be a column-major matrix. The view can also be used on both sides of an assignment:
// The submatrix can either be used as an alias to grant write access to a specific submatrix
// of a matrix primitive on the left-hand side of an assignment or to grant read-access to
// a specific submatrix of a matrix primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Creating a dense submatrix of size 8x4, starting in row 0 and column 2
auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL );
// Setting the submatrix of A to a 8x4 submatrix of B
sm = submatrix( B, 0UL, 0UL, 8UL, 4UL );
// Copying the sparse matrix C into another 8x4 submatrix of A
submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C;
// Assigning part of the result of a matrix addition to the first submatrix
sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL );
\endcode
// \n \section views_submatrices_element_access Element Access
// <hr>
//
// The elements of a submatrix can be directly accessed with the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
// Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as
// with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant submatrices an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
*it = ...; // OK: Write access to the dense submatrix value.
... = *it; // OK: Read access to the dense submatrix value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense submatrix value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_submatrices_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse submatrix can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A
// The function call operator provides access to all possible elements of the sparse submatrix,
// including the zero elements. In case the function call operator is used to access an element
// that is currently not stored in the sparse submatrix, the element is inserted into the
// submatrix.
sm(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the submatrix it is inserted into the submatrix, if it is already contained
// in the submatrix its value is modified.
sm.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the submatrix is the insert() function. However,
// it inserts the element only in case the element is not already contained in the submatrix.
sm.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of submatrices, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// or column of the submatrix and that the according row's or column's capacity is large
// enough to hold the new element. Note however that due to the nature of a submatrix, which
// may be an alias to the middle of a sparse matrix, the append() function does not work as
// efficiently for a submatrix as it does for a matrix.
sm.reserve( 2UL, 10UL );
sm.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_submatrices_common_operations Common Operations
// <hr>
//
// A submatrix view can be used like any other dense or sparse matrix. This means that with only
// a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// submatrices are views on a specific submatrix of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the a 8x12 submatrix of matrix A
auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL );
sm.rows(); // Returns the number of rows of the submatrix
sm.columns(); // Returns the number of columns of the submatrix
sm.capacity(); // Returns the capacity of the submatrix
sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix
sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix
auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL );
swap( sm, sm2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_submatrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse submatrices can be used in all arithmetic operations that any other dense
// or sparse matrix can be used in. The following example gives an impression of the use of dense
// submatrices within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse matrices with
// fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1
// starting from row 0 and column 0
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix
// starting in row 0 and column 8
sm = S1; // Sparse matrix initialization of the second 8x8 submatrix
D3 = sm + D2; // Dense matrix/dense matrix addition
S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction
D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1
D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1
D2 = 2.0 * sm; // Scaling of the a submatrix of D1
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment
submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment
a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_aligned_submatrices Aligned Submatrices
// <hr>
//
// Usually submatrices can be defined anywhere within a matrix. They may start at any position and
// may have an arbitrary extension (only restricted by the extension of the underlying matrix).
// However, in contrast to matrices themselves, which are always properly aligned in memory and
// therefore can provide maximum performance, this means that submatrices in general have to be
// considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0
auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A );
auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A );
\endcode
// All of these calls to the \c submatrix() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide
// full flexibility in the creation of submatrices, this might result in performance disadvantages
// in comparison to matrix primitives (even in case the specified submatrix could be aligned).
// Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a matrix might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned submatrices. Aligned submatrices are identical to
// unaligned submatrices in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying matrix. Aligned submatrices are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of each row/column of the submatrix must be aligned. The following source code
// gives some examples for a double precision row-major dynamic matrix, assuming that padding is
// enabled and that AVX is available, which packs 4 \c double values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL );
// ... Resizing and initialization
// OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding)
auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL );
// OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding)
auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL );
// OK: First column is a multiple of 4 and the submatrix includes the last row and column
auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL );
// Error: First column is not a multiple of 4, i.e. the first element is not aligned
auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense submatrices.
// In contrast, aligned sparse submatrices at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned submatrix is created:
\code
using blaze::aligned;
blaze::CompressedMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices
//
// Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of a 16x16 symmetric matrix
SymmetricMatrix< DynamicMatrix<int> > A( 16UL );
// Creating a dense submatrix of size 8x12, starting in row 2 and column 4
auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL );
\endcode
// It is important to note, however, that (compound) assignments to such submatrices have a
// special restriction: The symmetry of the underlying symmetric matrix must not be broken!
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n Previous: \ref views_element_selections Next: \ref views_rows
*/
//*************************************************************************************************
//**Rows*******************************************************************************************
/*!\page views_rows Rows
//
// \tableofcontents
//
//
// Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a
// reference to a specific row. This reference is valid and can be used in every way any other
// row vector can be used as long as the matrix containing the row is not resized or entirely
// destroyed. The row also acts as an alias to the row elements: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix
// and changes made via the matrix are immediately visible in the row.
//
//
// \n \section views_rows_setup Setup of Rows
// <hr>
//
// \image html row.png
// \image latex row.eps "Row view" width=250pt
//
// A reference to a dense or sparse row can be created very conveniently via the \c row() function.
// It can be included via the header file
\code
#include <blaze/math/Row.h>
\endcode
// The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows
// of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st row of matrix A (compile time index)
auto row1 = row<1UL>( A );
// Creating a reference to the 2nd row of matrix A (runtime index)
auto row2 = row( A, 2UL );
\endcode
// The \c row() function returns an expression representing the row view. The type of this
// expression depends on the given row arguments, primarily the type of the matrix and the compile
// time arguments. If the type is required, it can be determined via \c decltype or via the
// \c RowExprTrait class template:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowType1 = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) );
using RowType2 = blaze::RowExprTrait<MatrixType,1UL>::Type;
\endcode
// The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. The reference can also be used on
// both sides of an assignment: The row can either be used as an alias to grant write access to a
// specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific row of a matrix primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd row of matrix A to x
auto row2 = row( A, 2UL );
row2 = x;
// Setting the 3rd row of matrix B to y
row( B, 3UL ) = y;
// Setting x to the 4th row of the result of the matrix multiplication
x = row( A * B, 4UL );
// Setting y to the 2nd row of the result of the sparse matrix multiplication
y = row( C * D, 2UL );
\endcode
// \n \section views_rows_element_access Element Access
// <hr>
//
// The elements of a row can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th row of matrix A
auto row4 = row( A, 4UL );
// Setting the 1st element of the dense row, which corresponds
// to the 1st element in the 4th row of matrix A
row4[1] = 2.0;
\endcode
// The numbering of the row elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of columns of the referenced matrix. Alternatively, the elements of a
// row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// rows an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // OK; Write access to the dense row value
... = *it; // OK: Read access to the dense row value.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense row value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_rows_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse row can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
// The subscript operator provides access to all possible elements of the sparse row,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse row, the element is inserted into the row.
row0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the row it is inserted into the row, if it is already contained in
// the row its value is modified.
row0.set( 45UL, -1.2 );
// An alternative for inserting elements into the row is the insert() function. However,
// it inserts the element only in case the element is not already contained in the row.
row0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse row is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the row and that the row's capacity is large
// enough to hold the new element.
row0.reserve( 10UL );
row0.append( 51UL, -2.1 );
\endcode
// \n \section views_rows_common_operations Common Operations
// <hr>
//
// A row view can be used like any other row vector. This means that with only a few exceptions
// all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the
// current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since rows are references to specific rows of a matrix, several operations
// are not possible on views, such as resizing and swapping. The following example shows this by
// means of a dense row view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd row of matrix A
auto row2 = row( A, 2UL );
row2.size(); // Returns the number of elements in the row
row2.capacity(); // Returns the capacity of the row
row2.nonZeros(); // Returns the number of non-zero elements contained in the row
row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix
auto row3 = row( A, 3UL );
swap( row2, row3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_rows_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse rows can be used in all arithmetic operations that any other dense or
// sparse row vector can be used in. The following example gives an impression of the use of
// dense rows within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse rows with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::rowVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
row0[0] = 0.0; // Manual initialization of the 0th row of A
row0[1] = 0.0;
row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A
row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A
row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A
b = row0 + a; // Dense vector/dense vector addition
b = c + row( A, 1UL ); // Sparse vector/dense vector addition
b = row0 * row( A, 2UL ); // Component-wise vector multiplication
row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row
b = row( A, 1UL ) * 2.0; // Scaling of the 1st row
b = 2.0 * row( A, 1UL ); // Scaling of the 1st row
row( A, 2UL ) += a; // Addition assignment
row( A, 2UL ) -= c; // Subtraction assignment
row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment
double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors
A = trans( c ) * row( A, 1UL ); // Outer product between two vectors
\endcode
// \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that row views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st row of a column-major matrix A
auto row1 = row( A, 1UL );
for( auto it=row1.begin(); it!=row1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a row view on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row view on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th row of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th row of the column-major matrix A with B.
blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B;
\endcode
// Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_submatrices Next: \ref views_row_selections
*/
//*************************************************************************************************
//**Row Selections*********************************************************************************
/*!\page views_row_selections Row Selections
//
// \tableofcontents
//
//
// Row selections provide views on arbitrary compositions of rows of dense and sparse matrices.
// These views act as a reference to the selected rows and represent them as another dense or
// sparse matrix. This reference is valid and can be used in every way any other dense or sparse
// matrix can be used as long as the matrix containing the rows is not resized or entirely
// destroyed. The row selection also acts as an alias to the matrix elements in the specified
// range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are
// immediately visible in the matrix and changes made via the matrix are immediately visible
// in the rows.
//
//
// \n \section views_row_selections_setup Setup of Row Selections
//
// A row selection can be created very conveniently via the \c rows() function. It can be included
// via the header file
\code
#include <blaze/math/Rows.h>
\endcode
// The indices of the rows to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the rows 4, 6, 8, and 10 (compile time arguments)
auto rs1 = rows<4UL,6UL,8UL,10UL>( A );
// Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto rs2 = rows( A, { 3UL, 2UL, 1UL } );
auto rs3 = rows( A, list );
// Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto rs4 = rows( A, array );
auto rs5 = rows( A, array.data(), array.size() );
// Selecting the row 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto rs6 = rows( A, vector );
auto rs7 = rows( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the rows of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times. The \c rows() function returns an
// expression representing the view on the selected rows. The type of this expression depends
// on the given arguments, primarily the type of the matrix and the compile time arguments. If
// the type is required, it can be determined via \c decltype or via the \c RowsExprTrait class
// template:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowsType1 = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
using RowsType2 = blaze::RowsExprTrait<MatrixType,3UL,0UL,4UL,8UL>::Type;
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// row selection will always be treated as a row-major matrix, regardless of the storage order of
// the matrix containing the rows. The view can also be used on both sides of an assignment: It
// can either be used as an alias to grant write access to specific rows of a matrix primitive
// on the left-hand side of an assignment or to grant read-access to specific rows of a matrix
// primitive or expression on the right-hand side of an assignment. The following example
// demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
blaze::DynamicMatrix<double,blaze::columnMajor> B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Selecting the rows 1, 3, 5, and 7 of A
auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting rows 1, 3, 5, and 7 of A to row 4 of B
rs = rows( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the rows 2, 4, 6, and 8 of A to C
rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C
B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \n \section views_row_selections_element_access Element Access
//
// The elements of a row selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the first four rows of A in reverse order
auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the row selection, which corresponds
// to the element at position (3,0) in matrix A
rs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as
// with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant row selection an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_row_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse row selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse row
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse row selection, the element
// is inserted into the row selection.
rs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the row selection it is inserted into the row selection, if it is already
// contained in the row selection its value is modified.
rs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the row selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// row selection.
rs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of row selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// of the row selection and that the according row's capacity is large enough to hold the new
// element. Note however that due to the nature of a row selection, which may be an alias to
// an arbitrary collection of rows, the append() function does not work as efficiently for
// a row selection as it does for a matrix.
rs.reserve( 2UL, 10UL );
rs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_row_selections_common_operations Common Operations
//
// A view on specific rows of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// row selections are views on specific rows of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the rows 8, 16, 24, and 32 of matrix A
auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } );
rs.rows(); // Returns the number of rows of the row selection
rs.columns(); // Returns the number of columns of the row selection
rs.capacity(); // Returns the capacity of the row selection
rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection
rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection
auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL );
swap( rs, rs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_row_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse row selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use
// of dense row selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21]
rs = D2; // Dense matrix assignment to the selected rows
rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows
D3 = rs + D2; // Dense matrix/dense matrix addition
S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows
D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows
D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows
rows( D1, indices1 ) += D2; // Addition assignment
rows( D1, indices2 ) -= S1; // Subtraction assignment
rows( D1, indices3 ) %= rs; // Schur product assignment
a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices
//
// Especially noteworthy is that row selections can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd row of a column-major matrix A
auto rs = rows( A, { 1UL, 3UL } );
// Traversing row 0 of the selection, which corresponds to the 1st row of matrix A
for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a row selection on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row selection on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix elements.
// Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th row of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// the 15th, 30th, and 45th row of the column-major matrix A with B.
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B;
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_rows Next: \ref views_columns
*/
//*************************************************************************************************
//**Columns****************************************************************************************
/*!\page views_columns Columns
//
// \tableofcontents
//
//
// Just as rows provide a view on a specific row of a matrix, columns provide views on a specific
// column of a dense or sparse matrix. As such, columns act as a reference to a specific column.
// This reference is valid an can be used in every way any other column vector can be used as long
// as the matrix containing the column is not resized or entirely destroyed. Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the
// matrix and changes made via the matrix are immediately visible in the column.
//
//
// \n \section views_colums_setup Setup of Columns
// <hr>
//
// \image html column.png
// \image latex column.eps "Column view" width=250pt
//
// A reference to a dense or sparse column can be created very conveniently via the \c column()
// function. It can be included via the header file
\code
#include <blaze/math/Column.h>
\endcode
// The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of
// columns of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st column of matrix A (compile time index)
auto col1 = column<1UL>( A );
// Creating a reference to the 2nd column of matrix A (runtime index)
auto col2 = column( A, 2UL );
\endcode
// The \c column() function returns an expression representing the column view. The type of this
// expression depends on the given column arguments, primarily the type of the matrix and the
// compile time arguments. If the type is required, it can be determined via \c decltype or via
// the \c ColumnExprTrait class template:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnType1 = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) );
using ColumnType2 = blaze::ColumnExprTrait<MatrixType,1UL>::Type;
\endcode
// The resulting view can be treated as any other column vector, i.e. it can be assigned to, it
// can be copied from, and it can be used in arithmetic operations. The reference can also be used
// on both sides of an assignment: The column can either be used as an alias to grant write access
// to a specific column of a matrix primitive on the left-hand side of an assignment or to grant
// read-access to a specific column of a matrix primitive or expression on the right-hand side
// of an assignment. The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::columnVector> x;
blaze::CompressedVector<double,blaze::columnVector> y;
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::columnMajor> C, D;
// ... Resizing and initialization
// Setting the 1st column of matrix A to x
auto col1 = column( A, 1UL );
col1 = x;
// Setting the 4th column of matrix B to y
column( B, 4UL ) = y;
// Setting x to the 2nd column of the result of the matrix multiplication
x = column( A * B, 2UL );
// Setting y to the 2nd column of the result of the sparse matrix multiplication
y = column( C * D, 2UL );
\endcode
// \n \section views_columns_element_access Element Access
// <hr>
//
// The elements of a column can be directly accessed with the subscript operator.
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th column of matrix A
auto col4 = column( A, 4UL );
// Setting the 1st element of the dense column, which corresponds
// to the 1st element in the 4th column of matrix A
col4[1] = 2.0;
\endcode
// The numbering of the column elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of rows of the referenced matrix. Alternatively, the elements of a column
// can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// columns an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // OK; Write access to the dense column value
... = *it; // OK: Read access to the dense column value.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense column value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_columns_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse column can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
// The subscript operator provides access to all possible elements of the sparse column,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse column, the element is inserted into the column.
col0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the column it is inserted into the column, if it is already contained
// in the column its value is modified.
col0.set( 45UL, -1.2 );
// An alternative for inserting elements into the column is the insert() function. However,
// it inserts the element only in case the element is not already contained in the column.
col0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse column is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the column and that the column's capacity is
// large enough to hold the new element.
col0.reserve( 10UL );
col0.append( 51UL, -2.1 );
\endcode
// \n \section views_columns_common_operations Common Operations
// <hr>
//
// A column view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since columns are references to specific columns of a matrix, several
// operations are not possible on views, such as resizing and swapping. The following example
// shows this by means of a dense column view:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd column of matrix A
auto col2 = column( A, 2UL );
col2.size(); // Returns the number of elements in the column
col2.capacity(); // Returns the capacity of the column
col2.nonZeros(); // Returns the number of non-zero elements contained in the column
col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix
auto col3 = column( A, 3UL );
swap( col2, col3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_columns_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse columns can be used in all arithmetic operations that any other dense or
// sparse column vector can be used in. The following example gives an impression of the use of
// dense columns within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse columns with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
col0[0] = 0.0; // Manual initialization of the 0th column of A
col0[1] = 0.0;
column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A
column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A
column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A
b = col0 + a; // Dense vector/dense vector addition
b = c + column( A, 1UL ); // Sparse vector/dense vector addition
b = col0 * column( A, 2UL ); // Component-wise vector multiplication
column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column
b = column( A, 1UL ) * 2.0; // Scaling of the 1st column
b = 2.0 * column( A, 1UL ); // Scaling of the 1st column
column( A, 2UL ) += a; // Addition assignment
column( A, 2UL ) -= c; // Subtraction assignment
column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment
double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors
A = column( A, 1UL ) * trans( c ); // Outer product between two vectors
\endcode
// \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that column views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st column of a column-major matrix A
auto col1 = column( A, 1UL );
for( auto it=col1.begin(); it!=col1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a column view on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column view on a matrix
// with column-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th column of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th column of the row-major matrix B.
blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL );
\endcode
// Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible
// using a column-major storage order for matrix \c B would result in a more efficient evaluation.
//
// \n Previous: \ref views_row_selections Next: \ref views_column_selections
*/
//*************************************************************************************************
//**Column Selections******************************************************************************
/*!\page views_column_selections Column Selections
//
// \tableofcontents
//
//
// Column selections provide views on arbitrary compositions of columns of dense and sparse
// matrices. These views act as a reference to the selected columns and represent them as another
// dense or sparse matrix. This reference is valid and can be used in every way any other dense
// or sparse matrix can be used as long as the matrix containing the columns is not resized or
// entirely destroyed. The column selection also acts as an alias to the matrix elements in the
// specified range: Changes made to the columns (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the columns.
//
//
// \n \section views_column_selections_setup Setup of Column Selections
//
// A column selection can be created very conveniently via the \c columns() function. It can be
// included via the header file
\code
#include <blaze/math/Columns.h>
\endcode
// The indices of the columns to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Selecting the columns 4, 6, 8, and 10 (compile time arguments)
auto cs1 = columns<4UL,6UL,8UL,10UL>( A );
// Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto cs2 = columns( A, { 3UL, 2UL, 1UL } );
auto cs3 = columns( A, list );
// Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto cs4 = columns( A, array );
auto cs5 = columns( A, array.data(), array.size() );
// Selecting the column 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto cs6 = columns( A, vector );
auto cs7 = columns( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the columns of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times. The \c columns() function returns an
// expression representing the view on the selected columns. The type of this expression depends
// on the given arguments, primarily the type of the matrix and the compile time arguments. If
// the type is required, it can be determined via \c decltype or via the \c ColumnsExprTrait
// class template:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnsType1 = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
using ColumnsType2 = blaze::ColumnsExprTrait<MatrixType,3UL,0UL,4UL,8UL>::Type;
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// column selection will always be treated as a column-major matrix, regardless of the storage
// order of the matrix containing the columns. The view can also be used on both sides of an
// assignment: It can either be used as an alias to grant write access to specific columns of a
// matrix primitive on the left-hand side of an assignment or to grant read-access to specific
// columns of a matrix primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
blaze::DynamicMatrix<double,blaze::rowMajor> B;
blaze::CompressedMatrix<double,blaze::columnMajor> C;
// ... Resizing and initialization
// Selecting the columns 1, 3, 5, and 7 of A
auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting columns 1, 3, 5, and 7 of A to column 4 of B
cs = columns( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the columns 2, 4, 6, and 8 of A to C
columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C
B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \n \section views_column_selections_element_access Element Access
//
// The elements of a column selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the first four columns of A in reverse order
auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the column selection, which corresponds
// to the element at position (0,3) in matrix A
cs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a column selection can be traversed via (const) iterators.
// Just as with matrices, in case of non-const column selection, \c begin() and \c end() return
// an iterator, which allows to manipuate the elements, in case of constant column selection an
// iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_column_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse column selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256
auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse column
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse column selection, the element
// is inserted into the column selection.
cs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the column selection it is inserted into the column selection, if it is
// already contained in the column selection its value is modified.
cs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the column selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// column selection.
cs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of column selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according column
// of the column selection and that the according column's capacity is large enough to hold the
// new element. Note however that due to the nature of a column selection, which may be an alias
// to an arbitrary collection of columns, the append() function does not work as efficiently
// for a column selection as it does for a matrix.
cs.reserve( 2UL, 10UL );
cs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_column_selections_common_operations Common Operations
//
// A view on specific columns of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// column selections are views on specific columns of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the columns 8, 16, 24, and 32 of matrix A
auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } );
cs.rows(); // Returns the number of rows of the column selection
cs.columns(); // Returns the number of columns of the column selection
cs.capacity(); // Returns the capacity of the column selection
cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection
cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection
auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL );
swap( cs, cs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_column_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse column selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use of
// dense column selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21]
cs = D2; // Dense matrix assignment to the selected columns
columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns
D3 = cs + D2; // Dense matrix/dense matrix addition
S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns
D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns
D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns
columns( D1, indices1 ) += D2; // Addition assignment
columns( D1, indices2 ) -= S1; // Subtraction assignment
columns( D1, indices3 ) %= cs; // Schur product assignment
a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix
//
// Especially noteworthy is that column selections can be created for both row-major and
// column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a
// row directly and the interface of a column-major matrix only allows to traverse a column, via
// views it is possible to traverse a row of a column-major matrix or a column of a row-major
// matrix. For instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd column of a column-major matrix A
auto cs = columns( A, { 1UL, 3UL } );
// Traversing column 0 of the selection, which corresponds to the 1st column of matrix A
for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a column selection on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column selection on a
// matrix with column-major storage format. This is due to the non-contiguous storage of the
// matrix elements. Therefore care has to be taken in the choice of the most suitable storage
// order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th column of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th, 30th, and 45th column of the row-major matrix B.
blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } );
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a column-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_columns Next: \ref views_bands
*/
//*************************************************************************************************
//**Bands******************************************************************************************
/*!\page views_bands Bands
//
// \tableofcontents
//
//
// Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the
// subdiagonal, ...). As such, bands act as a reference to a specific band. This reference
// is valid and can be used in every way any other vector can be used as long as the matrix
// containing the band is not resized or entirely destroyed. The band also acts as an alias to
// the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the band.
//
//
// \n \section views_bands_setup Setup of Bands
// <hr>
//
// \image html band.png
// \image latex band.eps "Band view" width=250pt
//
// A reference to a dense or sparse band can be created very conveniently via the \c band()
// function. It can be included via the header file
\code
#include <blaze/math/Band.h>
\endcode
// The band index must be in the range from \f$[1-M..N-1]\f$, where \c M is the total number of
// rows and \c N is the total number of columns, and can be specified both at compile time or at
// runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st lower band of matrix A (compile time index)
auto band1 = band<-1L>( A );
// Creating a reference to the 2nd upper band of matrix A (runtime index)
auto band2 = band( A, 2L );
\endcode
// In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view
// on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band()
// function with a compile time index of 0:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the diagonal of matrix A via the band() and diagonal() functions
auto diag1 = band<0L>( A );
auto diag2 = diagonal( A );
static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" );
\endcode
// Both the \c band() and the \c diagonal() function return an expression representing the band
// view. The type of this expression depends on the given arguments, primarily the type of the
// matrix and the compile time arguments. If the type is required, it can be determined via
// \c decltype or via the \c BandExprTrait and \c DiagonalExprTrait class templates, respectively:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using BandType1 = decltype( blaze::band<1L>( std::declval<MatrixType>() ) );
using BandType2 = blaze::BandExprTrait<MatrixType,1L>::Type;
using DiagonalType1 = decltype( blaze::diagonal( std::declval<MatrixType>() ) );
using DiagonalType2 = blaze::DiagonalExprTrait<MatrixType>::Type;
\endcode
// This resulting view can be treated as any other vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. By default, bands are considered
// column vectors, but this setting can be changed via the \c defaultTransposeFlag switch. The
// reference can also be used on both sides of an assignment: The band can either be used as an
// alias to grant write access to a specific band of a matrix primitive on the left-hand side of
// an assignment or to grant read-access to a specific band of a matrix primitive or expression
// on the right-hand side of an assignment. The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd upper band of matrix A to x
auto band2 = band( A, 2L );
band2 = x;
// Setting the 3rd upper band of matrix B to y
band( B, 3L ) = y;
// Setting x to the 2nd lower band of the result of the matrix multiplication
x = band( A * B, -2L );
// Setting y to the 2nd upper band of the result of the sparse matrix multiplication
y = band( C * D, 2L );
\endcode
// \n \section views_bands_element_access Element Access
// <hr>
//
// The elements of a band can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th upper band of matrix A
auto band4 = band( A, 4L );
// Setting the 1st element of the dense band, which corresponds
// to the 1st element in the 4th upper band of matrix A
band4[1] = 2.0;
\endcode
// The numbering of the band elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of elements of the referenced band. Alternatively, the elements of a band
// can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and
// \c end() return an iterator, which allows to manipulate the elements, in case of constant bands
// an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th upper band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
*it = ...; // OK; Write access to the dense band value
... = *it; // OK: Read access to the dense band value.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense band value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_bands_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse band can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto diag( band( A, 0L ) ); // Reference to the diagonal of A
// The subscript operator provides access to all possible elements of the sparse band,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse band, the element is inserted into the band.
diag[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the band it is inserted into the band, if it is already contained in
// the band its value is modified.
diag.set( 45UL, -1.2 );
// An alternative for inserting elements into the band is the insert() function. However,
// it inserts the element only in case the element is not already contained in the band.
diag.insert( 50UL, 3.7 );
\endcode
// \n \section views_bands_common_operations Common Operations
// <hr>
//
// A band view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of band elements can be obtained via the \c size() function, the current
// capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since bands are references to specific bands of a matrix,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of a dense band view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd upper band of matrix A
auto band2 = band( A, 2L );
band2.size(); // Returns the number of elements in the band
band2.capacity(); // Returns the capacity of the band
band2.nonZeros(); // Returns the number of non-zero elements contained in the band
band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix
auto band3 = band( A, 3L );
swap( band2, band3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_bands_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse bands can be used in all arithmetic operations that any other dense or
// sparse vector can be used in. The following example gives an impression of the use of dense
// bands within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse bands with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A
auto diag ( band( A, 0L ) ); // Reference to the diagonal of A
band1[0] = 0.0; // Manual initialization of the 1st upper band of A
diag = 1.0; // Homogeneous initialization of the diagonal of A
band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A
band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A
b = diag + a; // Dense vector/dense vector addition
b = c + band( A, -1L ); // Sparse vector/dense vector addition
b = diag * band( A, -2L ); // Component-wise vector multiplication
band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band
b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band
b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band
band( A, -2L ) += a; // Addition assignment
band( A, -2L ) -= c; // Subtraction assignment
band( A, -2L ) *= band( A, 0L ); // Multiplication assignment
double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors
A = band( A, -1L ) * trans( c ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_column_selections Next: \ref arithmetic_operations
*/
//*************************************************************************************************
//**Arithmetic Operations**************************************************************************
/*!\page arithmetic_operations Arithmetic Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following arithmetic operations for vectors and matrices:
//
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication </li>
// </ul>
//
// \n Previous: \ref views_bands Next: \ref addition
*/
//*************************************************************************************************
//**Addition***************************************************************************************
/*!\page addition Addition
//
// The addition of vectors and matrices is as intuitive as the addition of scalar values. For both
// the vector addition as well as the matrix addition the addition operator can be used. It even
// enables the addition of dense and sparse vectors as well as the addition of dense and sparse
// matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 + v2; // Addition of a two column vectors of different data type
\endcode
\code
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// add vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 + v2; // Compilation error: Cannot add a column vector and a row vector
v1 + trans( v2 ); // OK: Addition of two column vectors
\endcode
// In case of matrices, however, it is possible to add row-major and column-major matrices. Note
// however that in favor of performance the addition of two matrices with the same storage order
// is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 + v2; // Vectorized addition of two double precision vectors
\endcode
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref arithmetic_operations Next: \ref subtraction
*/
//*************************************************************************************************
//**Subtraction************************************************************************************
/*!\page subtraction Subtraction
//
// The subtraction of vectors and matrices works exactly as intuitive as the addition, but with
// the subtraction operator. For both the vector subtraction as well as the matrix subtraction
// the subtraction operator can be used. It also enables the subtraction of dense and sparse
// vectors as well as the subtraction of dense and sparse matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 - v2; // Subtraction of a two column vectors of different data type
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// subtract vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector
v1 - trans( v2 ); // OK: Subtraction of two column vectors
\endcode
// In case of matrices, however, it is possible to subtract row-major and column-major matrices.
// Note however that in favor of performance the subtraction of two matrices with the same storage
// order is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 - v2; // Vectorized subtraction of two double precision vectors
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref addition Next: \ref scalar_multiplication
*/
//*************************************************************************************************
//**Scalar Multiplication**************************************************************************
/*!\page scalar_multiplication Scalar Multiplication
//
// The scalar multiplication is the multiplication of a scalar value with a vector or a matrix.
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Additionally, it is possible to use std::complex values with the same built-in data
// types as element type.
\code
blaze::StaticVector<int,3UL> v1{ 1, 2, 3 };
blaze::DynamicVector<double> v2 = v1 * 1.2;
blaze::CompressedVector<float> v3 = -0.3F * v1;
\endcode
\code
blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
blaze::DynamicMatrix<double> M2 = M1 * 1.2;
blaze::CompressedMatrix<float> M3 = -0.3F * M1;
\endcode
// Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the
// following example). However, each vector and matrix provides the \c scale() function, which
// can be used to scale a vector or matrix element-wise with arbitrary scalar data types:
\code
blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1;
blaze::StaticMatrix<int,3UL,3UL> scalar;
M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication
M1.scale( scalar ); // Scalar multiplication
\endcode
// \n Previous: \ref subtraction Next: \ref componentwise_multiplication
*/
//*************************************************************************************************
//**Vector/Vector Multiplication*******************************************************************
/*!\page vector_vector_multiplication Vector/Vector Multiplication
//
// \n \section componentwise_multiplication Componentwise Multiplication
// <hr>
//
// Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or
// blaze::rowVector) via the multiplication operator results in a componentwise multiplication
// of the two vectors:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and
// a dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row
// vectors. The result is a dense row vector.
\endcode
// \n \section inner_product Inner Product / Scalar Product / Dot Product
// <hr>
//
// The multiplication between a row vector and a column vector results in an inner product between
// the two vectors:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
int result = v1 * v2; // Results in the value 15
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = v1 * trans( v2 ); // Also results in the value 15
\endcode
// Alternatively, either the \c inner() function, the \c dot() function or the comma operator can
// be used for any combination of vectors (row or column vectors) to perform an inner product:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
// All alternatives for the inner product between a column vector and a row vector
int result1 = trans( v1 ) * trans( v2 );
int result2 = inner( v1, v2 );
int result3 = dot( v1, v2 );
int result4 = (v1,v2);
\endcode
// When using the comma operator, please note the brackets embracing the inner product expression.
// Due to the low precedence of the comma operator (lower even than the assignment operator) these
// brackets are strictly required for a correct evaluation of the inner product.
//
//
// \n \section outer_product Outer Product
// <hr>
//
// The multiplication between a column vector and a row vector results in the outer product of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = v1 * v2;
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = trans( v1 ) * v2;
\endcode
// Alternatively, the \c outer() function can be used for any combination of vectors (row or column
// vectors) to perform an outer product:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors
\endcode
// \n \section cross_product Cross Product
// <hr>
//
// Two vectors with the same transpose flag can be multiplied via the cross product. The cross
// product between two vectors \f$ a \f$ and \f$ b \f$ is defined as
\f[
\left(\begin{array}{*{1}{c}}
c_0 \\
c_1 \\
c_2 \\
\end{array}\right)
=
\left(\begin{array}{*{1}{c}}
a_1 b_2 - a_2 b_1 \\
a_2 b_0 - a_0 b_2 \\
a_0 b_1 - a_1 b_0 \\
\end{array}\right).
\f]
// Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is
// realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%)
// can be used in case infix notation is required:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) );
blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 );
\endcode
// Please note that the cross product is restricted to three dimensional (dense and sparse)
// column vectors.
//
// \n Previous: \ref scalar_multiplication Next: \ref vector_vector_division
*/
//*************************************************************************************************
//**Vector/Vector Division*************************************************************************
/*!\page vector_vector_division Vector/Vector Division
//
// \n \section componentwise_division Componentwise Division
// <hr>
//
// Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector
// or blaze::rowVector) via the division operator results in a componentwise division:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a
// dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row
// vectors. The result is a dense row vector.
\endcode
// Note that all values of the divisor must be non-zero and that no checks are performed to assert
// this precondition!
//
// \n Previous: \ref vector_vector_multiplication Next: \ref matrix_vector_multiplication
*/
//*************************************************************************************************
//**Matrix/Vector Multiplication*******************************************************************
/*!\page matrix_vector_multiplication Matrix/Vector Multiplication
//
// In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical
// textbooks. Just as in textbooks there are two different multiplications between a matrix and
// a vector: a matrix/column vector multiplication and a row vector/matrix multiplication:
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::DynamicMatrix;
DynamicMatrix<int> M1( 39UL, 12UL );
StaticVector<int,12UL,columnVector> v1;
// ... Initialization of the matrix and the vector
DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication
DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication
\endcode
// Note that the storage order of the matrix poses no restrictions on the operation. Also note,
// that the highest performance for a multiplication between a dense matrix and a dense vector can
// be achieved if both the matrix and the vector have the same scalar element type.
//
// \n Previous: \ref vector_vector_division Next: \ref matrix_matrix_multiplication
*/
//*************************************************************************************************
//**Matrix/Matrix Multiplication*******************************************************************
/*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication
//
// \n \section schur_product Componentwise Multiplication / Schur Product
// <hr>
//
// Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns)
// via the modulo operator results in a componentwise multiplication (Schur product) of the two
// matrices:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 28UL, 35UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 % M2;
\endcode
// \n \section matrix_product Matrix Product
// <hr>
//
// The matrix/matrix product can be formulated exactly as in mathematical textbooks:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 45UL, 85UL );
CompressedMatrix<float> M2( 85UL, 37UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 * M2;
\endcode
// The storage order of the two matrices poses no restrictions on the operation, all variations
// are possible. It is also possible to multiply two matrices with different element type, as
// long as the element types themselves can be multiplied and added. Note however that the
// highest performance for a multiplication between two matrices can be expected for two
// matrices with the same scalar element type.
//
// In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper
// triangular, or diagonal, the computation can be optimized by explicitly declaring the
// multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by
// means of the \ref matrix_operations_declaration_operations :
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> M1, M2, M3;
// ... Initialization of the square matrices
M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric
M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian
M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular
M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular
M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal
\endcode
// Using a declaration operation on the a multiplication expression can speed up the computation
// by a factor of 2. Note however that the caller of the according declaration operation takes
// full responsibility for the correctness of the declaration. Falsely declaring a multiplication
// as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined
// behavior!
//
// \n Previous: \ref matrix_vector_multiplication Next: \ref shared_memory_parallelization
*/
//*************************************************************************************************
//**Shared Memory Parallelization******************************************************************
/*!\page shared_memory_parallelization Shared Memory Parallelization
//
// For all possible operations \b Blaze tries to achieve maximum performance on a single CPU
// core. However, today's CPUs are not single core anymore, but provide several (homogeneous
// or heterogeneous) compute cores. In order to fully exploit the performance potential of a
// multicore CPU, computations have to be parallelized across all available cores of a CPU.
// For this purpose, \b Blaze provides four different shared memory parallelization techniques:
//
// - \ref openmp_parallelization
// - \ref cpp_threads_parallelization
// - \ref boost_threads_parallelization
// - \ref hpx_parallelization
//
// When any of the shared memory parallelization techniques is activated, all arithmetic
// operations on dense vectors and matrices (including additions, subtractions, multiplications,
// divisions, and all componentwise arithmetic operations) and most operations on sparse vectors
// and matrices are automatically run in parallel. However, in addition, \b Blaze provides means
// to enforce the serial execution of specific operations:
//
// - \ref serial_execution
//
// \n Previous: \ref matrix_matrix_multiplication Next: \ref openmp_parallelization
*/
//*************************************************************************************************
//**OpenMP Parallelization*************************************************************************
/*!\page openmp_parallelization OpenMP Parallelization
//
// \tableofcontents
//
//
// \n \section openmp_setup OpenMP Setup
// <hr>
//
// To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify
// the use of OpenMP on the command line:
\code
-fopenmp // GNU/Clang C++ compiler
-openmp // Intel C++ compiler
/openmp // Visual Studio
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of threads.
//
// As common for OpenMP, the number of threads can be specified either via an environment variable
\code
export OMP_NUM_THREADS=4 // Unix systems
set OMP_NUM_THREADS=4 // Windows systems
\endcode
// or via an explicit call to the \c omp_set_num_threads() function:
\code
omp_set_num_threads( 4 );
\endcode
// Alternatively, the number of threads can also be specified via the \c setNumThreads() function
// provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of OpenMP, the function returns the maximum number of threads OpenMP will use
// within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function.
//
//
// \n \section openmp_configuration OpenMP Configuration
// <hr>
//
// Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze
// deems the parallel execution as counterproductive for the overall performance, the operation
// is executed serially. One of the main reasons for not executing an operation in parallel is
// the size of the operands. For instance, a vector addition is only executed in parallel if the
// size of both vector operands exceeds a certain threshold. Otherwise, the performance could
// seriously decrease due to the overhead caused by the thread setup. However, in order to be
// able to adjust the \b Blaze library to a specific system, it is possible to configure these
// thresholds manually. All shared memory thresholds are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique (see also \ref cpp_threads_parallelization and
// \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum
// performance for all possible situations and configurations. They merely provide a reasonable
// standard for the current CPU generation.
//
//
// \n \section openmp_first_touch First Touch Policy
// <hr>
//
// So far the \b Blaze library does not (yet) automatically initialize dynamic memory according
// to the first touch principle. Consider for instance the following vector triad example:
\code
using blaze::columnVector;
const size_t N( 1000000UL );
blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N );
// Initialization of the vectors b, c, and d
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// Performing a vector triad
a = b + c * d;
\endcode
// If this code, which is prototypical for many OpenMP applications that have not been optimized
// for ccNUMA architectures, is run across several locality domains (LD), it will not scale
// beyond the maximum performance achievable on a single LD if the working set does not fit into
// the cache. This is because the initialization loop is executed by a single thread, writing to
// \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will
// be mapped into a single LD.
//
// As mentioned above, this problem can be solved by performing vector initialization in parallel:
\code
// ...
// Initialization of the vectors b, c, and d
#pragma omp parallel for
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// ...
\endcode
// This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for
// instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in
// order to achieve the maximum possible performance, it is imperative to initialize the memory
// according to the later use of the data structures.
//
//
// \n \section openmp_limitations Limitations of the OpenMP Parallelization
// <hr>
//
// There are a few important limitations to the current \b Blaze OpenMP parallelization. The first
// one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the
// other one the OpenMP \c sections directive (see \ref openmp_sections).
//
//
// \n \subsection openmp_parallel The Parallel Directive
//
// In OpenMP threads are explicitly spawned via the an OpenMP parallel directive:
\code
// Serial region, executed by a single thread
#pragma omp parallel
{
// Parallel region, executed by the specified number of threads
}
// Serial region, executed by a single thread
\endcode
// Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a
// parallel directive is encountered. Therefore, from a performance point of view, it seems to be
// beneficial to use a single OpenMP parallel directive for several operations:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
#pragma omp parallel
{
y1 = A * x;
y2 = B * x;
}
\endcode
// Unfortunately, this optimization approach is not allowed within the \b Blaze library. More
// explicitly, it is not allowed to put an operation into a parallel region. The reason is that
// the entire code contained within a parallel region is executed by all threads. Although this
// appears to just comprise the contained computations, a computation (or more specifically the
// assignment of an expression to a vector or matrix) can contain additional logic that must not
// be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.).
// Therefore it is not possible to manually start a parallel region for several operations, but
// \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand
// and the given operands.
//
// \n \subsection openmp_sections The Sections Directive
//
// OpenMP provides several work-sharing construct to distribute work among threads. One of these
// constructs is the \c sections directive:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = A * x;
#pragma omp section
y2 = B * x;
}
\endcode
// In this example, two threads are used to compute two distinct matrix/vector multiplications
// concurrently. Thereby each of the \c sections is executed by exactly one thread.
//
// Unfortunately \b Blaze does not support concurrent parallel computations and therefore this
// approach does not work with any of the \b Blaze parallelization techniques. All techniques
// (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization
// and \ref boost_threads_parallelization) are optimized for the parallel computation of an
// operation within a single thread of execution. This means that \b Blaze tries to use all
// available threads to compute the result of a single operation as efficiently as possible.
// Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations
// and to let \b Blaze compute all operations within a \c sections directive in serial. This can
// be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution)
// or by selectively serializing all operations within a \c sections directive via the \c serial()
// function:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = serial( A * x );
#pragma omp section
y2 = serial( B * x );
}
\endcode
// Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does
// NOT work in this context!
//
// \n Previous: \ref shared_memory_parallelization Next: \ref cpp_threads_parallelization
*/
//*************************************************************************************************
//**C++11 Thread Parallelization*******************************************************************
/*!\page cpp_threads_parallelization C++11 Thread Parallelization
//
// \tableofcontents
//
//
// In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1,
// \b Blaze also provides a shared memory parallelization based on C++11 threads.
//
//
// \n \section cpp_threads_setup C++11 Thread Setup
// <hr>
//
// In order to enable the C++11 thread-based parallelization, first the according C++11-specific
// compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument
// has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the
// compiler flags have to be extended by
\code
... -std=c++11 -DBLAZE_USE_CPP_THREADS ...
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11
// threads are enabled on the command line, the OpenMP-based parallelization has priority and
// is preferred.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of C++11 threads, the function will return the previously specified number of
// threads.
//
//
// \n \section cpp_threads_configuration C++11 Thread Configuration
// <hr>
//
// As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an
// operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for
// the overall performance, the operation is executed serially. One of the main reasons for not
// executing an operation in parallel is the size of the operands. For instance, a vector addition
// is only executed in parallel if the size of both vector operands exceeds a certain threshold.
// Otherwise, the performance could seriously decrease due to the overhead caused by the thread
// setup. However, in order to be able to adjust the \b Blaze library to a specific system, it
// is possible to configure these thresholds manually. All thresholds are contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the C++11 thread parallelization.
//
//
// \n \section cpp_threads_known_issues Known Issues
// <hr>
//
// There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang
// if their destructor is executed after the \c main() function:
//
// http://connect.microsoft.com/VisualStudio/feedback/details/747145
//
// Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug.
// In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function,
// which can be used to manually destroy all threads at the end of the \c main() function:
\code
int main()
{
// ... Using the C++11 thread parallelization of Blaze
shutDownThreads();
}
\endcode
// Please note that this function may only be used at the end of the \c main() function. After
// this function no further computation may be executed! Also note that this function has an
// effect for Visual Studio compilers only and doesn't need to be used with any other compiler.
//
// \n Previous: \ref openmp_parallelization Next: \ref boost_threads_parallelization
*/
//*************************************************************************************************
//**Boost Thread Parallelization*******************************************************************
/*!\page boost_threads_parallelization Boost Thread Parallelization
//
// \tableofcontents
//
//
// The third available shared memory parallelization provided with \b Blaze is based on Boost
// threads.
//
//
// \n \section boost_threads_setup Boost Thread Setup
// <hr>
//
// In order to enable the Boost thread-based parallelization, two steps have to be taken: First,
// the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_BOOST_THREADS ...
\endcode
// Second, the according Boost libraries have to be linked. These two simple actions will cause
// the \b Blaze library to automatically try to run all operations in parallel with the specified
// number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations
// have priority, i.e. are preferred in case either is enabled in combination with the Boost
// thread parallelization.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of Boost threads, the function will return the previously specified number of
// threads.
//
//
// \n \section boost_threads_configuration Boost Thread Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization).
// All thresholds related to the Boost thread parallelization are also contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the Boost thread parallelization.
//
// \n Previous: \ref cpp_threads_parallelization Next: \ref hpx_parallelization
*/
//*************************************************************************************************
//**HPX Parallelization****************************************************************************
/*!\page hpx_parallelization HPX Parallelization
//
// \tableofcontents
//
//
// The fourth and final shared memory parallelization provided with \b Blaze is based on
// <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>.
//
//
// \n \section hpx_setup HPX Setup
// <hr>
//
// In order to enable the HPX-based parallelization, the following steps have to be taken: First,
// the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_HPX_THREADS ...
\endcode
// Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked.
// And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see
// the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a>
// for further details). These three actions will cause the \b Blaze library to automatically try
// to run all operations in parallel with the specified number of HPX threads.
//
// Note that the OpenMP-based, C++11 thread-based, and Boost thread-based parallelizations have
// priority, i.e. are preferred in case either is enabled in combination with the HPX thread
// parallelization.
//
// The number of threads used by the HPX backend has to be specified via the command line:
\code
... --hpx:threads 4 ...
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of HPX threads, the function will return the actual number of threads used by
// the HPX subsystem.
//
//
// \n \section hpx_configuration HPX Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given
// operation is large enough and exceeds a certain threshold the operation is executed in parallel.
// All thresholds related to the HPX-based parallelization are contained within the configuration
// file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the HPX-based parallelization.
//
// \n Previous: \ref boost_threads_parallelization Next: \ref serial_execution
*/
//*************************************************************************************************
//**Serial Execution*******************************************************************************
/*!\page serial_execution Serial Execution
//
// Sometimes it may be necessary to enforce the serial execution of specific operations. For this
// purpose, the \b Blaze library offers three possible options: the serialization of a single
// expression via the \c serial() function, the serialization of a block of expressions via the
// \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution.
//
//
// \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression
// <hr>
//
// The first option is the serialization of a specific operation via the \c serial() function:
\code
blaze::DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
C = serial( A + B );
\endcode
// \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any
// kind of dense or sparse vector or matrix expression.
//
//
// \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions
// <hr>
//
// The second option is the temporary and local enforcement of a serial execution via the
// \c BLAZE_SERIAL_SECTION:
\code
using blaze::rowMajor;
using blaze::columnVector;
blaze::DynamicMatrix<double,rowMajor> A;
blaze::DynamicVector<double,columnVector> b, c, d, x, y, z;
// ... Resizing and initialization
// Parallel execution
// If possible and beneficial for performance the following operation is executed in parallel.
x = A * b;
// Serial execution
// All operations executed within the serial section are guaranteed to be executed in
// serial (even if a parallel execution would be possible and/or beneficial).
BLAZE_SERIAL_SECTION
{
y = A * c;
z = A * d;
}
// Parallel execution continued
// ...
\endcode
// Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial.
// Outside the scope of the serial section, all operations are run in parallel (if beneficial for
// the performance).
//
// Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution.
// The use of the serial section within several concurrent threads will result undefined behavior!
//
//
// \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution
// <hr>
//
// The third option is the general deactivation of the parallel execution (even in case OpenMP is
// enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION
// switch in the <tt>./blaze/config/SMP.h</tt> configuration file:
\code
#define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1
\endcode
// In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory
// parallelization is deactivated altogether.
//
// \n Previous: \ref hpx_parallelization Next: \ref serialization
*/
//*************************************************************************************************
//**Serialization**********************************************************************************
/*!\page serialization Serialization
//
// Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing
// results or for sharing specific setups with other people. The \b Blaze math serialization
// module provides the according functionality to create platform independent, portable, binary
// representations of vectors and matrices that can be used to store the \b Blaze data structures
// without loss of precision and to reliably transfer them from one machine to another.
//
// The following two pages explain how to serialize vectors and matrices:
//
// - \ref vector_serialization
// - \ref matrix_serialization
//
// \n Previous: \ref serial_execution Next: \ref vector_serialization
*/
//*************************************************************************************************
//**Vector Serialization***************************************************************************
/*!\page vector_serialization Vector Serialization
//
// The following example demonstrates the (de-)serialization of dense and sparse vectors:
\code
using blaze::columnVector;
using blaze::rowVector;
// Serialization of both vectors
{
blaze::StaticVector<double,5UL,rowVector> d;
blaze::CompressedVector<int,columnVector> s;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vectors.blaze"
blaze::Archive<std::ofstream> archive( "vectors.blaze" );
// Serialization of both vectors into the same archive. Note that d lies before s!
archive << d << s;
}
// Reconstitution of both vectors
{
blaze::DynamicVector<double,rowVector> d1;
blaze::DynamicVector<int,rowVector> d2;
// Creating an archive that reads from the file "vectors.blaze"
blaze::Archive<std::ifstream> archive( "vectors.blaze" );
// Reconstituting the former d vector into d1. Note that it is possible to reconstitute
// the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that
// the type of elements has to be the same.
archive >> d1;
// Reconstituting the former s vector into d2. Note that is is even possible to reconstitute
// a sparse vector as a dense vector (also the reverse is possible) and that a column vector
// can be reconstituted as row vector (and vice versa). Note however that also in this case
// the type of elements is the same!
archive >> d2
}
\endcode
// The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can
// also be used for vectors with vector or matrix element type:
\code
// Serialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vector.blaze"
blaze::Archive<std::ofstream> archive( "vector.blaze" );
// Serialization of the vector into the archive
archive << vec;
}
// Deserialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// Creating an archive that reads from the file "vector.blaze"
blaze::Archive<std::ifstream> archive( "vector.blaze" );
// Reconstitution of the vector from the archive
archive >> vec;
}
\endcode
// As the examples demonstrates, the vector serialization offers an enormous flexibility. However,
// several actions result in errors:
//
// - vectors cannot be reconstituted as matrices (and vice versa)
// - the element type of the serialized and reconstituted vector must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticVector, its size must match the size of the serialized vector
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref serialization Next: \ref matrix_serialization
*/
//*************************************************************************************************
//**Matrix Serialization***************************************************************************
/*!\page matrix_serialization Matrix Serialization
//
// The serialization of matrices works in the same manner as the serialization of vectors. The
// following example demonstrates the (de-)serialization of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
// Serialization of both matrices
{
blaze::StaticMatrix<double,3UL,5UL,rowMajor> D;
blaze::CompressedMatrix<int,columnMajor> S;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrices.blaze"
blaze::Archive<std::ofstream> archive( "matrices.blaze" );
// Serialization of both matrices into the same archive. Note that D lies before S!
archive << D << S;
}
// Reconstitution of both matrices
{
blaze::DynamicMatrix<double,rowMajor> D1;
blaze::DynamicMatrix<int,rowMajor> D2;
// Creating an archive that reads from the file "matrices.blaze"
blaze::Archive<std::ifstream> archive( "matrices.blaze" );
// Reconstituting the former D matrix into D1. Note that it is possible to reconstitute
// the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that
// the type of elements has to be the same.
archive >> D1;
// Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute
// a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major
// matrix can be reconstituted as row-major matrix (and vice versa). Note however that also
// in this case the type of elements is the same!
archive >> D2
}
\endcode
// Note that also in case of matrices it is possible to (de-)serialize matrices with vector or
// matrix elements:
\code
// Serialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrix.blaze"
blaze::Archive<std::ofstream> archive( "matrix.blaze" );
// Serialization of the matrix into the archive
archive << mat;
}
// Deserialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// Creating an archive that reads from the file "matrix.blaze"
blaze::Archive<std::ifstream> archive( "matrix.blaze" );
// Reconstitution of the matrix from the archive
archive >> mat;
}
\endcode
// Note that just as the vector serialization, the matrix serialization is restricted by a
// few important rules:
//
// - matrices cannot be reconstituted as vectors (and vice versa)
// - the element type of the serialized and reconstituted matrix must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticMatrix, the number of rows and columns must match those
// of the serialized matrix
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref vector_serialization Next: \ref customization \n
*/
//*************************************************************************************************
//**Customization**********************************************************************************
/*!\page customization Customization
//
// Although \b Blaze tries to work out of the box for every possible setting, still it may be
// necessary to adapt the library to specific requirements. The following three pages explain
// how to customize the \b Blaze library to your own needs:
//
// - \ref configuration_files
// - \ref vector_and_matrix_customization
// - \ref error_reporting_customization
//
// \n Previous: \ref matrix_serialization Next: \ref configuration_files
*/
//*************************************************************************************************
//**Configuration Files****************************************************************************
/*!\page configuration_files Configuration Files
//
// \tableofcontents
//
//
// Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose
// \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory,
// which provide ample opportunity to customize internal settings, behavior, and thresholds.
// This chapter explains the most important of these configuration files. For a complete
// overview of all customization opportunities, please go to the configuration files in the
// <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation.
//
//
// \n \section transpose_flag Default Vector Storage
// <hr>
//
// The \b Blaze default is that all vectors are created as column vectors (if not specified
// explicitly):
\code
blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector
\endcode
// The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default
// vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library.
// The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
\endcode
// Alternatively the default transpose flag can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector.
//
//
// \n \section storage_order Default Matrix Storage
// <hr>
//
// Matrices are by default created as row-major matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix
\endcode
// The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default
// matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order
// for all matrices of the \b Blaze library can be specified.
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
\endcode
// Alternatively the default storage order can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor.
//
//
// \n \section blas_mode BLAS Mode
// <hr>
//
// In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can
// be configured to use a BLAS library. Via the following compilation switch in the configuration
// file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled:
\code
#define BLAZE_BLAS_MODE 1
\endcode
// In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL
// switch should be activated to prevent \b Blaze from parallelizing on its own:
\code
#define BLAZE_BLAS_IS_PARALLEL 1
\endcode
// Alternatively, both settings can be specified via command line or by defining the symbols
// manually before including any \b Blaze header file:
\code
#define BLAZE_BLAS_MODE 1
#define BLAZE_BLAS_IS_PARALLEL 1
#include <blaze/Blaze.h>
\endcode
// In case no BLAS library is available, \b Blaze will still work and will not be reduced in
// functionality, but performance may be limited.
//
//
// \n \section cache_size Cache Size
// <hr>
//
// The optimization of several \b Blaze compute kernels depends on the cache size of the target
// architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal
// speed the exact cache size of the system should be provided via the \c cacheSize value in the
// <tt>./blaze/config/CacheSize.h</tt> configuration file:
\code
#define BLAZE_CACHE_SIZE 3145728UL;
\endcode
// The cache size can also be specified via command line or by defining this symbol manually
// before including any \b Blaze header file:
\code
#define BLAZE_CACHE_SIZE 3145728UL
#include <blaze/Blaze.h>
\endcode
// \n \section vectorization Vectorization
// <hr>
//
// In order to achieve maximum performance and to exploit the compute power of a target platform
// the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or
// AVX-512 intrinsics, depending on which instruction set is available. However, it is possible
// to disable the vectorization entirely by the compile time switch in the configuration file
// <tt>./blaze/config/Vectorization.h</tt>:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is
// disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for
// the operations. Note that deactivating the vectorization may pose a severe performance
// limitation for a large number of operations!
//
//
// \n \section thresholds Thresholds
// <hr>
//
// For many computations \b Blaze distinguishes between small and large vectors and matrices.
// This separation is especially important for the parallel execution of computations, since
// the use of several threads only pays off for sufficiently large vectors and matrices.
// Additionally, it also enables \b Blaze to select kernels that are optimized for a specific
// size.
//
// In order to distinguish between small and large data structures \b Blaze provides several
// thresholds that can be adapted to the characteristics of the target platform. For instance,
// the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom
// \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels
// for large multiplications. All thresholds, including the thresholds for the OpenMP- and
// thread-based parallelization, are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
//
// \n \section padding Padding
// <hr>
//
// By default the \b Blaze library uses padding for all dense vectors and matrices in order to
// achieve maximum performance in all operations. Due to padding, the proper alignment of data
// elements can be guaranteed and the need for remainder loops is minimized. However, on the
// downside padding introduces an additional memory overhead, which can be large depending on
// the used data type.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate padding:
\code
#define BLAZE_USE_PADDING 1
\endcode
// Alternatively it is possible to (de-)activate padding via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_USE_PADDING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if
// it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce
// the performance of all dense vector and matrix operations!
//
//
// \n \section streaming Streaming (Non-Temporal Stores)
// <hr>
//
// For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide
// a significant performance advantage of about 20%. However, this advantage is only in effect in
// case the memory bandwidth of the target architecture is maxed out. If the target architecture's
// memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance
// instead of increasing it.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate streaming:
\code
#define BLAZE_USE_STREAMING 1
\endcode
// Alternatively streaming can be (de-)activated via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_STREAMING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is
// disabled. It is recommended to consult the target architecture's white papers to decide whether
// streaming is beneficial or hurtful for performance.
//
//
// \n Previous: \ref customization Next: \ref vector_and_matrix_customization \n
*/
//*************************************************************************************************
//**Customization of Vectors and Matrices**********************************************************
/*!\page vector_and_matrix_customization Customization of Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section custom_data_members Custom Data Members
// <hr>
//
// So far the \b Blaze library does not provide a lot of flexibility to customize the data
// members of existing \ref vector_types and \ref matrix_types. However, to some extend it is
// possible to customize vectors and matrices by inheritance. The following example gives an
// impression on how to create a simple variation of \ref matrix_types_custom_matrix, which
// automatically takes care of acquiring and releasing custom memory.
\code
template< typename Type // Data type of the matrix
, bool SO = defaultStorageOrder > // Storage order
class MyCustomMatrix
: public CustomMatrix< Type, unaligned, unpadded, SO >
{
public:
explicit inline MyCustomMatrix( size_t m, size_t n )
: CustomMatrix<Type,unaligned,unpadded,SO>()
, array_( new Type[m*n] )
{
this->reset( array_.get(), m, n );
}
private:
std::unique_ptr<Type[]> array_;
};
\endcode
// Please note that this is a simplified example with the intent to show the general approach.
// The number of constructors, the memory acquisition, and the kind of memory management can of
// course be adapted to specific requirements. Also, please note that since none of the \b Blaze
// vectors and matrices have virtual destructors polymorphic destruction cannot be used.
//
//
// \n \section custom_operations Custom Operations
// <hr>
//
// There are two approaches to extend \b Blaze with custom operations. First, the \c map()
// functions provide the possibility to execute componentwise custom operations on vectors and
// matrices. Second, it is possible to add customized free functions.
//
// \n \subsection custom_operations_map The map() Functions
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors and matrices. The unary \c map() function can be used to apply a custom
// operation on each single element of a dense vector or matrix or each non-zero element of a
// sparse vector or matrix. For instance, the following example demonstrates a custom square
// root computation on a dense matrix:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors or two dense matrices. The following example demonstrates the merging of
// two matrices of double precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( (-2.1, 0.3) (-4.2, -1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// These examples demonstrate the most convenient way of defining a unary custom operation by
// passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom
// functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
};
B = map( A, Sqrt() );
\endcode
// In order for the functor to work in a call to \c map() it must define a function call operator,
// which accepts arguments of the type of the according vector or matrix elements.
//
// Although the operation is automatically parallelized depending on the size of the vector or
// matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load()
// function can be added to the functor, which handles the vectorized computation. Depending on
// the data type this function is passed one of the following \b Blaze SIMD data types:
//
// <ul>
// <li>SIMD data types for fundamental data types
// <ul>
// <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li>
// <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li>
// <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li>
// <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li>
// <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li>
// <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li>
// </ul>
// </li>
// <li>SIMD data types for complex data types
// <ul>
// <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li>
// <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li>
// </ul>
// </li>
// </ul>
//
// All SIMD types provide the \c value data member for a direct access to the underlying intrinsic
// data element. In the following example, this intrinsic element is passed to the AVX function
// \c _mm256_sqrt_pd():
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
SIMDdouble load( const SIMDdouble& a ) const
{
return _mm256_sqrt_pd( a.value );
}
};
\endcode
// In this example, whenever vectorization is generally applicable, the \c load() function is
// called instead of the function call operator for as long as the number of remaining elements
// is larger-or-equal to the width of the packed SIMD type. In all other cases (which also
// includes peel-off and remainder loops) the scalar operation is used.
//
// Please note that this example has two drawbacks: First, it will only compile in case the
// intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the
// availability of AVX is not taken into account. The first drawback can be alleviated by making
// the \c load() function a function template. The second drawback can be dealt with by adding a
// \c simdEnabled() function template to the functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
template< typename T >
T load( const T& a ) const
{
return _mm256_sqrt_pd( a.value );
}
template< typename T >
static constexpr bool simdEnabled() {
#if defined(__AVX__)
return true;
#else
return false;
#endif
}
};
\endcode
// The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether
// or not vectorization is available for the given data type \c T. In case the function returns
// \c true, the \c load() function is used for a vectorized evaluation, in case the function
// returns \c false, \c load() is not called.
//
// Note that this is a simplified example that is only working when used for dense vectors and
// matrices with double precision floating point elements. The following code shows the complete
// implementation of the according functor that is used within the \b Blaze library. The \b Blaze
// \c Sqrt functor is working for all data types that are providing a square root operation:
\code
namespace blaze {
struct Sqrt
{
template< typename T >
BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const
{
return sqrt( a );
}
template< typename T >
static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; }
template< typename T >
BLAZE_ALWAYS_INLINE auto load( const T& a ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T );
return sqrt( a );
}
};
} // namespace blaze
\endcode
// The same approach can be taken for binary custom operations. The following code demonstrates
// the \c Min functor of the \b Blaze library, which is working for all data types that provide
// a \c min() operation:
\code
struct Min
{
explicit inline Min()
{}
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const
{
return min( a, b );
}
template< typename T1, typename T2 >
static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; }
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 );
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 );
return min( a, b );
}
};
\endcode
// For more information on the available \b Blaze SIMD data types and functions, please see the
// SIMD module in the complete \b Blaze documentation.
//
// \n \subsection custom_operations_free_functions Free Functions
//
// In order to extend \b Blaze with new functionality it is possible to add free functions. Free
// functions can be used either as wrappers around calls to the map() function or to implement
// general, non-componentwise operations. The following two examples will demonstrate both ideas.
//
// The first example shows the \c setToZero() function, which resets a sparse matrix to zero
// without affecting the sparsity pattern. It is implemented as a convenience wrapper around
// the map() function:
\code
template< typename MT // Type of the sparse matrix
, bool SO > // Storage order
void setToZero( blaze::SparseMatrix<MT,SO>& mat )
{
(~mat) = blaze::map( ~mat, []( int ){ return 0; } );
}
\endcode
// The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and
// provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the
// <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a>
// it also enables a conversion back to the actual type. This downcast is performed via the tilde
// operator (i.e. \c operator~()). The template parameter \c SO represents the storage order
// (blaze::rowMajor or blaze::columnMajor) of the matrix.
//
// The second example shows the \c countZeros() function, which counts the number of values, which
// are exactly zero, in a dense, row-major matrix:
\code
template< typename MT >
size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat )
{
const size_t M( (~mat).rows() );
const size_t N( (~mat).columns() );
size_t count( 0UL );
for( size_t i=0UL; i<M; ++i ) {
for( size_t j=0UL; j<N; ++j ) {
if( blaze::isDefault<strict>( (~mat)(i,j) ) )
++count;
}
}
return count;
}
\endcode
// The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again,
// it is possible to perform the conversion to the actual type via the tilde operator.
//
// The following two listings show the declarations of all vector and matrix base classes, which
// can be used for custom free functions:
\code
template< typename VT // Concrete type of the dense or sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class Vector;
template< typename VT // Concrete type of the dense vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class DenseVector;
template< typename VT // Concrete type of the sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class SparseVector;
\endcode
\code
template< typename MT // Concrete type of the dense or sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class Matrix;
template< typename MT // Concrete type of the dense matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class DenseMatrix;
template< typename MT // Concrete type of the sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class SparseMatrix;
\endcode
// \n \section custom_data_types Custom Data Types
// <hr>
//
// The \b Blaze library tries hard to make the use of custom data types as convenient, easy and
// intuitive as possible. However, unfortunately it is not possible to meet the requirements of
// all possible data types. Thus it might be necessary to provide \b Blaze with some additional
// information about the data type. The following sections give an overview of the necessary steps
// to enable the use of the hypothetical custom data type \c custom::double_t for vector and
// matrix operations. For example:
\code
blaze::DynamicVector<custom::double_t> a, b, c;
// ... Resizing and initialization
c = a + b;
\endcode
// The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+()
// for additions, \c operator-() for subtractions, \c operator*() for multiplications and
// \c operator/() for divisions. If any of these functions is missing it is necessary to implement
// the operator to perform the according operation. For this example we assume that the custom
// data type provides the four following functions instead of operators:
\code
namespace custom {
double_t add ( const double_t& a, const double_t b );
double_t sub ( const double_t& a, const double_t b );
double_t mult( const double_t& a, const double_t b );
double_t div ( const double_t& a, const double_t b );
} // namespace custom
\endcode
// The following implementations will satisfy the requirements of the \b Blaze library:
\code
inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b )
{
return add( a, b );
}
inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b )
{
return sub( a, b );
}
inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b )
{
return mult( a, b );
}
inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b )
{
return div( a, b );
}
\endcode
// \b Blaze will use all the information provided with these functions (for instance the return
// type) to properly handle the operations. In the rare case that the return type cannot be
// automatically determined from the operator it might be additionally necessary to provide a
// specialization of the following four \b Blaze class templates:
\code
namespace blaze {
template<>
struct AddTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct SubTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct MultTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct DivTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
} // namespace blaze
\endcode
// The same steps are necessary if several custom data types need to be combined (as for instance
// \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to
// be taken into account:
\code
custom::double_t operator+( const custom::double_t& a, const custom::float_t& b );
custom::double_t operator+( const custom::float_t& a, const custom::double_t& b );
// ...
\endcode
// Please note that only built-in data types apply for vectorization and thus custom data types
// cannot achieve maximum performance!
//
//
// \n Previous: \ref configuration_files Next: \ref custom_operations \n
*/
//*************************************************************************************************
//**Customization of the Error Reporting Mechanism*************************************************
/*!\page error_reporting_customization Customization of the Error Reporting Mechanism
//
// \tableofcontents
//
//
// \n \section error_reporting_background Background
// <hr>
//
// The default way of \b Blaze to report errors of any kind is to throw a standard exception.
// However, although in general this approach works well, in certain environments and under
// special circumstances exceptions may not be the mechanism of choice and a different error
// reporting mechanism may be desirable. For this reason, \b Blaze provides several macros,
// which enable the customization of the error reporting mechanism. Via these macros it is
// possible to replace the standard exceptions by some other exception type or a completely
// different approach to report errors.
//
//
// \n \section error_reporting_general_customization Customization of the Reporting Mechanism
// <hr>
//
// In some cases it might be necessary to adapt the entire error reporting mechanism and to
// replace it by some other means to signal failure. The primary macro for this purpose is the
// \c BLAZE_THROW macro:
\code
#define BLAZE_THROW( EXCEPTION ) \
throw EXCEPTION
\endcode
// This macro represents the default mechanism of the \b Blaze library to report errors of any
// kind. In order to customize the error reporing mechanism all that needs to be done is to
// define the macro prior to including any \b Blaze header file. This will cause the \b Blaze
// specific mechanism to be overridden. The following example demonstrates this by replacing
// exceptions by a call to a \c log() function and a direct call to abort:
\code
#define BLAZE_THROW( EXCEPTION ) \
log( "..." ); \
abort()
#include <blaze/Blaze.h>
\endcode
// Doing this will trigger a call to \c log() and an abort instead of throwing an exception
// whenever an error (such as an invalid argument) is detected.
//
// \note It is possible to execute several statements instead of executing a single statement to
// throw an exception. Also note that it is recommended to define the macro such that a subsequent
// semicolon is required!
//
// \warning This macro is provided with the intention to assist in adapting \b Blaze to special
// conditions and environments. However, the customization of the error reporting mechanism via
// this macro can have a significant effect on the library. Thus be advised to use the macro
// with due care!
//
//
// \n \section error_reporting_exception_customization Customization of the Type of Exceptions
// <hr>
//
// In addition to the customization of the entire error reporting mechanism it is also possible
// to customize the type of exceptions being thrown. This can be achieved by customizing any
// number of the following macros:
\code
#define BLAZE_THROW_BAD_ALLOC \
BLAZE_THROW( std::bad_alloc() )
#define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \
BLAZE_THROW( std::logic_error( MESSAGE ) )
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( std::invalid_argument( MESSAGE ) )
#define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \
BLAZE_THROW( std::length_error( MESSAGE ) )
#define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \
BLAZE_THROW( std::out_of_range( MESSAGE ) )
#define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \
BLAZE_THROW( std::runtime_error( MESSAGE ) )
\endcode
// In order to customize the type of exception the according macro has to be defined prior to
// including any \b Blaze header file. This will override the \b Blaze default behavior. The
// following example demonstrates this by replacing \c std::invalid_argument by a custom
// exception type:
\code
class InvalidArgument
{
public:
InvalidArgument();
explicit InvalidArgument( const std::string& message );
// ...
};
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( InvalidArgument( MESSAGE ) )
#include <blaze/Blaze.h>
\endcode
// By manually defining the macro, an \c InvalidArgument exception is thrown instead of a
// \c std::invalid_argument exception. Note that it is recommended to define the macro such
// that a subsequent semicolon is required!
//
// \warning These macros are provided with the intention to assist in adapting \b Blaze to
// special conditions and environments. However, the customization of the type of an exception
// via this macro may have an effect on the library. Thus be advised to use the macro with due
// care!
//
//
// \n \section error_reporting_special_errors Customization of Special Errors
// <hr>
//
// Last but not least it is possible to customize the error reporting for special kinds of errors.
// This can be achieved by customizing any number of the following macros:
\code
#define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
#define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
\endcode
// As explained in the previous sections, in order to customize the handling of special errors
// the according macro has to be defined prior to including any \b Blaze header file. This will
// override the \b Blaze default behavior.
//
//
// \n Previous: \ref vector_and_matrix_customization Next: \ref blas_functions \n
*/
//*************************************************************************************************
//**BLAS Functions*********************************************************************************
/*!\page blas_functions BLAS Functions
//
// \tableofcontents
//
//
// For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices
// \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements
// several convenient C++ wrapper functions for several BLAS functions. The following sections
// give a complete overview of all available BLAS level 1, 2 and 3 functions.
//
//
// \n \section blas_level_1 BLAS Level 1
// <hr>
//
// \subsection blas_level_1_dotu Dot Product (dotu)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()):
\code
namespace blaze {
float dotu( int n, const float* x, int incX, const float* y, int incY );
double dotu( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotu( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotu( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(),
// and \c zdotc_sub()):
\code
namespace blaze {
float dotc( int n, const float* x, int incX, const float* y, int incY );
double dotc( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotc( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotc( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_axpy Axpy Product (axpy)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()):
\code
namespace blaze {
void axpy( int n, float alpha, const float* x, int incX, float* y, int incY );
void axpy( int n, double alpha, const double* x, int incX, double* y, int incY );
void axpy( int n, complex<float> alpha, const complex<float>* x,
int incX, complex<float>* y, int incY );
void axpy( int n, complex<double> alpha, const complex<double>* x,
int incX, complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST >
void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha );
} // namespace blaze
\endcode
// \n \section blas_level_2 BLAS Level 2
// <hr>
//
// \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()):
\code
namespace blaze {
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha,
const float* A, int lda, const float* x, int incX,
float beta, float* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha,
const double* A, int lda, const double* x, int incX,
double beta, double* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha,
const complex<float>* A, int lda, const complex<float>* x, int incX,
complex<float> beta, complex<float>* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha,
const complex<double>* A, int lda, const complex<double>* x, int incX,
complex<double> beta, complex<double>* y, int incY );
template< typename VT1, typename MT1, bool SO, typename VT2, typename ST >
void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A,
const DenseVector<VT2,false>& x, ST alpha, ST beta );
template< typename VT1, typename VT2, typename MT1, bool SO, typename ST >
void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x,
const DenseMatrix<MT1,SO>& A, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(),
// and \c ztrmv()):
\code
namespace blaze {
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const float* A, int lda, float* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const double* A, int lda, double* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
} // namespace blaze
\endcode
// \n \section blas_level_3 BLAS Level 3
// <hr>
//
// \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()):
\code
namespace blaze {
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, float alpha, const float* A, int lda,
const float* B, int ldb, float beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, double alpha, const double* A, int lda,
const double* B, int ldb, double beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda,
const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda,
const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST >
void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A,
const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and
// \c ztrmm()):
\code
namespace blaze {
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trsm Triangular System Solver (trsm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for solving
// a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()):
\code
namespace blaze {
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n Previous: \ref error_reporting_customization Next: \ref lapack_functions \n
*/
//*************************************************************************************************
//**LAPACK Functions*******************************************************************************
/*!\page lapack_functions LAPACK Functions
//
// \tableofcontents
//
//
// \n \section lapack_introction Introduction
// <hr>
//
// The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks
// (including the decomposition, inversion and the computation of the determinant of dense matrices).
// For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required
// LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper
// functions. For more details on the individual LAPACK functions see the \b Blaze function
// documentation or the LAPACK online documentation browser:
//
// http://www.netlib.org/lapack/explore-html/
//
// Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They
// provide the parameters of the original LAPACK functions and thus provide maximum flexibility:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const int m ( numeric_cast<int>( A.rows() ) ); // == N
const int n ( numeric_cast<int>( A.columns() ) ); // == N
const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N
const int lwork( n*lda );
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
const std::unique_ptr<double[]> work( new double[N] ); // No initialization required
int info( 0 );
getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info'
getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info'
\endcode
// Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These
// wrappers provide a maximum of convenience:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
getrf( A, ipiv.get() ); // Cannot fail
getri( A, ipiv.get() ); // Reports failure via exception
\endcode
// \note All functions only work for general, non-adapted matrices with \c float, \c double,
// \c complex<float>, or \c complex<double> element type. The attempt to call the function with
// adaptors or matrices of any other element type results in a compile time error!
//
// \note All functions can only be used if a fitting LAPACK library is available and linked to
// the final executable. Otherwise a call to this function will result in a linker error.
//
// \note For performance reasons all functions do only provide the basic exception safety guarantee,
// i.e. in case an exception is thrown the given matrix may already have been modified.
//
//
// \n \section lapack_decomposition Matrix Decomposition
// <hr>
//
// The following functions decompose/factorize the given dense matrix. Based on this decomposition
// the matrix can be inverted or used to solve a linear system of equations.
//
//
// \n \subsection lapack_lu_decomposition LU Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(),
// \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix:
\code
namespace blaze {
void getrf( int m, int n, float* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, double* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info );
template< typename MT, bool SO >
void getrf( DenseMatrix<MT,SO>& A, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = P \cdot L \cdot U, \f]\n
// where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper
// triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major
// matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit
// diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is
// transposed.
//
// \note The LU decomposition will never fail, even for singular matrices. However, in case of a
// singular matrix the resulting decomposition cannot be used for a matrix inversion or solving
// a linear system of equations.
//
//
// \n \subsection lapack_ldlt_decomposition LDLT Decomposition
//
// The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(),
// \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given
// symmetric indefinite matrix:
\code
namespace blaze {
void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info );
void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or }
A = L D L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_ldlh_decomposition LDLH Decomposition
//
// The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(),
// which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix:
\code
namespace blaze {
void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or }
A = L D L^{H} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_llh_decomposition Cholesky Decomposition
//
// The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(),
// \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given
// positive definite matrix:
\code
namespace blaze {
void potrf( char uplo, int n, float* A, int lda, int* info );
void potrf( char uplo, int n, double* A, int lda, int* info );
void potrf( char uplo, int n, complex<float>* A, int lda, int* info );
void potrf( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potrf( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U^{T} U \texttt{ (if uplo = 'U'), or }
A = L L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky
// decomposition fails if the given matrix \a A is not a positive definite matrix. In this case
// a \a std::std::invalid_argument exception is thrown.
//
//
// \n \subsection lapack_qr_decomposition QR Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(),
// \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix:
\code
namespace blaze {
void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot R, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the
// min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n);
// the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as
// a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(),
// \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition:
\code
namespace blaze {
void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(),
// \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from
// a QR decomposition:
\code
namespace blaze {
void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_rq_decomposition RQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(),
// \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix:
\code
namespace blaze {
void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = R \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and
// <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>,
// and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray
// <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case
// \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n
// upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau
// represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(),
// \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition:
\code
namespace blaze {
void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(),
// \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from
// a RQ decomposition:
\code
namespace blaze {
void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_ql_decomposition QL Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(),
// \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix:
\code
namespace blaze {
void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot L, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and
// <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>,
// and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray
// A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n,
// the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower
// trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent
// the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(),
// \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition:
\code
namespace blaze {
void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(),
// \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from
// a QL decomposition:
\code
namespace blaze {
void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_lq_decomposition LQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(),
// \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix:
\code
namespace blaze {
void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = L \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the
// \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n);
// the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q
// as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(),
// \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition:
\code
namespace blaze {
void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(),
// \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from
// a LQ decomposition:
\code
namespace blaze {
void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \section lapack_inversion Matrix Inversion
// <hr>
//
// Given a matrix that has already been decomposed, the following functions can be used to invert
// the matrix in-place.
//
//
// \n \subsection lapack_lu_inversion LU-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(),
// \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by
// an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info );
void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info );
void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info );
void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void getri( DenseMatrix<MT,SO>& A, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_inversion LDLT-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(),
// \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been
// decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info );
void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info );
void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_inversion LDLH-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c chetri() and
// \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by
// an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_inversion Cholesky-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(),
// \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been
// decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potri( char uplo, int n, float* A, int lda, int* info );
void potri( char uplo, int n, double* A, int lda, int* info );
void potri( char uplo, int n, complex<float>* A, int lda, int* info );
void potri( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potri( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(),
// \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place:
\code
namespace blaze {
void trtri( char uplo, char diag, int n, float* A, int lda, int* info );
void trtri( char uplo, char diag, int n, double* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_substitution Substitution
// <hr>
//
// Given a matrix that has already been decomposed the following functions can be used to perform
// the forward/backward substitution step to compute the solution to a system of linear equations.
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \n \subsection lapack_lu_substitution LU-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(),
// \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has
// already been decomposed by an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_substitution LDLT-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(),
// \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite
// matrix that has already been decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_substitution LDLH-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(),
// which perform the substitution step for an Hermitian indefinite matrix that has already been
// decomposed by an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_substitution Cholesky-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(),
// \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix
// that has already been decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(),
// \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix:
\code
namespace blaze {
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_linear_system_solver Linear System Solver
// <hr>
//
// The following functions represent compound functions that perform both the decomposition step
// as well as the substitution step to compute the solution to a system of linear equations. Note
// that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \subsection lapack_lu_linear_system_solver LU-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(),
// \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according
// \ref lapack_lu_substitution :
\code
namespace blaze {
void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info );
void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_lu_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(),
// \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according
// \ref lapack_ldlt_substitution :
\code
namespace blaze {
void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlt_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(),
// \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according
// \ref lapack_ldlh_substitution :
\code
namespace blaze {
void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first two functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(),
// \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according
// \ref lapack_llh_substitution :
\code
namespace blaze {
void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_llh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(),
// \c ctrsv(), and \c ztrsv():
\code
namespace blaze {
void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename MT, bool SO, typename VT, bool TF >
void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N'.
//
// The last function throws a \a std::invalid_argument exception in case of an error. Note that
// none of the functions does perform any test for singularity or near-singularity. Such tests
// must be performed prior to calling this function!
//
//
// \n \section lapack_eigenvalues Eigenvalues/Eigenvectors
//
// \subsection lapack_eigenvalues_general General Matrices
//
// The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(),
// \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of
// the given general matrix:
\code
namespace blaze {
void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR );
} // namespace blaze
\endcode
// The complex eigenvalues of the given matrix \a A are returned in the given vector \a w.
// Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs
// of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part
// first.
//
// If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR
// in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major
// matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies
\f[ A * v[j] = lambda[j] * v[j], \f]
// where \f$lambda[j]\f$ is its eigenvalue.
//
// If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL
// in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major
// matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies
\f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f]
// where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$.
//
// \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The
// functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// The first four functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices
//
// The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(),
// which compute the eigenvalues and eigenvectors of the given symmetric matrix:
\code
namespace blaze {
void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info );
void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info );
void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix:
\code
namespace blaze {
void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info );
void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices
//
// The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(),
// which compute the eigenvalues and eigenvectors of the given Hermitian matrix:
\code
namespace blaze {
void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info );
void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info );
void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix:
\code
namespace blaze {
void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info );
void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \section lapack_singular_values Singular Values/Singular Vectors
//
// The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(),
// \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given
// general matrix:
\code
namespace blaze {
void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd()
// functions they compute the singular value decomposition (SVD) of the given general matrix by
// applying a divide-and-conquer strategy for the computation of the left and right singular
// vectors:
\code
namespace blaze {
void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz );
} // namespace blaze
\endcode
// The resulting decomposition has the form
\f[ A = U \cdot S \cdot V, \f]
// where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal
// elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal
// matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n)
// columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively.
//
// The resulting min(\a m,\a n) real and non-negative singular values are returned in descending
// order in the vector \a s, which is resized to the correct size (if possible and necessary).
//
// Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(),
// \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or
// vectors:
\code
namespace blaze {
void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The number of singular values to be computed is specified by the lower bound \a low and the
// upper bound \a upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// The first four functions report failure via the \c info argument, the remaining functions throw
// an exception in case of an error.
//
//
// \n Previous: \ref blas_functions Next: \ref block_vectors_and_matrices \n
*/
//*************************************************************************************************
//**Block Vectors and Matrices*********************************************************************
/*!\page block_vectors_and_matrices Block Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section block_vectors_and_matrices_general General Concepts
// <hr>
//
// In addition to fundamental element types, the \b Blaze library supports vectors and matrices
// with non-fundamental element type. For instance, it is possible to define block matrices by
// using a matrix type as the element type:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A;
DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// The matrix/vector multiplication in this example runs fully parallel and uses vectorization
// for every inner matrix/vector multiplication and vector addition.
//
//
// \n \section block_vectors_and_matrices_pitfalls Pitfalls
// <hr>
//
// The only thing to keep in mind when using non-fundamental element types is that all operations
// between the elements have to be well defined. More specifically, the size of vector and matrix
// elements has to match. The attempt to combine two non-matching elements results in either a
// compilation error (in case of statically sized elements) or an exception (for dynamically sized
// elements):
\code
DynamicVector< StaticVector<int,2UL> > a;
DynamicVector< StaticVector<int,3UL> > b;
DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match
\endcode
// Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector,
// \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized
// accordingly upfront.
//
//
// \n \section block_vectors_and_matrices_examples Examples
// <hr>
//
// The first example demonstrates the multiplication between a statically sized block matrix
// and a block vector:
\code
using namespace blaze;
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ) * ( ) = ( )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>;
using V2 = StaticVector<int,2UL,columnVector>;
DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) },
{ M2x2(3), M2x2(4) } };
DynamicVector<V2,columnVector> x{ V2(1), V2(2) };
DynamicVector<V2,columnVector> y( A * x );
\endcode
// The second example shows the multiplication between a compressed block matrix with blocks of
// varying size and a compressed block vector:
\code
using namespace blaze;
// ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) )
// ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) )
// ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) )
// ( ) ( ) ( )
// ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) )
// ( ) ( ) ( )
// ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) )
// ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) )
using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>;
using V3 = HybridVector<int,3UL,columnVector>;
CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL );
A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } };
A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } };
A(1,1) = M3x3{ { 1 } };
A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } };
A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } };
CompressedVector<V3,columnVector> x( 3UL, 3UL );
x[0] = V3{ 1, 0, 1 };
x[1] = V3{ 2 };
x[2] = V3{ -1, 2 };
CompressedVector<V3,columnVector> y( A * x );
\endcode
// \n Previous: \ref lapack_functions Next: \ref intra_statement_optimization \n
*/
//*************************************************************************************************
//**Intra-Statement Optimization*******************************************************************
/*!\page intra_statement_optimization Intra-Statement Optimization
//
// One of the prime features of the \b Blaze library is the automatic intra-statement optimization.
// In order to optimize the overall performance of every single statement \b Blaze attempts to
// rearrange the operands based on their types. For instance, the following addition of dense and
// sparse vectors
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1 + d2;
\endcode
// is automatically rearranged and evaluated as
\code
// ...
d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged
\endcode
// This order of operands is highly favorable for the overall performance since the addition of
// the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized
// fashion.
//
// This intra-statement optimization can have a tremendous effect on the performance of a statement.
// Consider for instance the following computation:
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = A * B * x;
\endcode
// Since multiplications are evaluated from left to right, this statement would result in a
// matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the
// right subexpression is evaluated first, the performance can be dramatically improved since the
// matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication.
// The \b Blaze library exploits this by automatically restructuring the expression such that the
// right multiplication is evaluated first:
\code
// ...
y = A * ( B * x );
\endcode
// Note however that although this intra-statement optimization may result in a measurable or
// even significant performance improvement, this behavior may be undesirable for several reasons,
// for instance because of numerical stability. Therefore, in case the order of evaluation matters,
// the best solution is to be explicit and to separate a statement into several statements:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ...
d3 += d2; // ... and afterwards add the second dense vector
\endcode
\code
// ...
blaze::DynamicMatrix<double> A, B, C;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
C = A * B; // Compute the left-hand side matrix-matrix multiplication first ...
y = C * x; // ... before the right-hand side matrix-vector multiplication
\endcode
// Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + eval( s1 + d2 );
\endcode
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = eval( A * B ) * x;
\endcode
// \n Previous: \ref block_vectors_and_matrices
*/
//*************************************************************************************************
#endif
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
/** \internal */
inline void ei_manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
if(action==SetAction)
{
ei_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
ei_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
ei_internal_assert(false);
}
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
ei_manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
ei_manage_multi_threading(SetAction, &v);
}
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
int volatile sync;
int volatile users;
Index rhs_start;
Index rhs_length;
};
template<bool Condition, typename Functor, typename Index>
void ei_parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose)
{
#ifndef EIGEN_HAS_OPENMP
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// 1- are we already in a parallel session?
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Index size = transpose ? cols : rows;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index max_threads = std::max<Index>(1,size / 32);
// 3 - compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), max_threads);
if(threads==1)
return func(0,rows, 0,cols);
func.initParallelSession();
if(transpose)
std::swap(rows,cols);
Index blockCols = (cols / threads) & ~Index(0x3);
Index blockRows = (rows / threads) & ~Index(0x7);
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
#pragma omp parallel for schedule(static,1) num_threads(threads)
for(Index i=0; i<threads; ++i)
{
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
if(transpose)
func(0, cols, r0, actualBlockRows, info);
else
func(r0, actualBlockRows, 0,cols, info);
}
delete[] info;
#endif
}
#endif // EIGEN_PARALLELIZER_H
|
GB_subassign_13.c | //------------------------------------------------------------------------------
// GB_subassign_13: C(I,J)<!M> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 13: C(I,J)<!M> = scalar ; using S
// M: present
// Mask_comp: true
// C_replace: false
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap, but can be full since no zombies are inserted in that case
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_13
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t Cnvec = C->nvec ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 13: C(I,J)<!M> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_noaccum_C_A_1_scalar ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
ark_heat1D_ompdev.c | /*---------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
*---------------------------------------------------------------
* Based on the serial example ark_heat1D.c developed by
* Daniel R. Reynolds and parallelized with OpenMP 4.5.
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2022, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a simple 1D heat equation,
* u_t = k*u_xx + f
* for t in [0, 10], x in [0, 1], with initial conditions
* u(0,x) = 0
* Dirichlet boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* and a point-source heating term,
* f = 1 for x=0.5.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with either an ERK or DIRK
* method. For the DIRK method, we use a Newton iteration with
* the SUNLinSol_PCG linear solver, and a user-supplied Jacobian-vector
* product routine.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts. */
#include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */
#include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */
#include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
realtype dx; /* mesh spacing */
realtype k; /* diffusion coefficient */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main() {
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(1.0); /* final time */
int Nt = 10; /* total number of output times */
realtype rtol = 1.e-6; /* relative tolerance */
realtype atol = 1.e-10; /* absolute tolerance */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype k = 0.5; /* heat conductivity */
sunindextype i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
SUNLinearSolver LS = NULL; /* empty linear solver object */
void *arkode_mem = NULL; /* empty ARKStep memory structure */
FILE *FID, *UFID;
realtype t, dTout, tout;
int iout;
long int nst, nst_a, nfe, nfi, nsetups, nli, nJv, nlcf, nni, ncfn, netf;
/* Create the SUNDIALS context object for this simulation */
SUNContext ctx;
flag = SUNContext_Create(NULL, &ctx);
if (check_flag(&flag, "SUNContext_Create", 1)) return 1;
/* allocate and fill udata structure */
udata = (UserData) malloc(sizeof(*udata));
udata->N = N;
udata->k = k;
udata->dx = RCONST(1.0)/(1.0*N-1.0); /* mesh spacing */
/* Initial problem output */
printf("\n1D Heat PDE test problem:\n");
printf(" N = %li\n", (long int) udata->N);
printf(" diffusion coefficient: k = %"GSYM"\n", udata->k);
/* Initialize data structures */
y = N_VNew_OpenMPDEV(N, ctx); /* Create OpenMPDEV vector for solution */
if (check_flag((void *) y, "N_VNew_Serial", 0)) return 1;
N_VConst(0.0, y); /* Set initial conditions */
/* Call ARKStepCreate to initialize the integrator memory and specify the
right-hand side function in y'=f(t,y), the inital time T0, and
the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
arkode_mem = ARKStepCreate(NULL, f, T0, y, ctx);
if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */
if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1;
flag = ARKStepSetPredictorMethod(arkode_mem, 1); /* Specify maximum-order predictor */
if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Initialize PCG solver -- no preconditioning, with up to N iterations */
LS = SUNLinSol_PCG(y, 0, N, ctx);
if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1;
/* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */
flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1;
/* Specify linearly implicit RHS, with non-time-dependent Jacobian */
flag = ARKStepSetLinear(arkode_mem, 0);
if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1;
/* output mesh to disk */
FID=fopen("heat_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data array */
UFID=fopen("heat1D.txt","w");
data = N_VGetHostArrayPointer_OpenMPDEV(y);
N_VCopyFromDevice_OpenMPDEV(y); /* always copy back from device before printing */
/* output initial condition to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
/* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms\n");
printf(" -------------------------\n");
printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N));
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStep", 1)) break;
printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N)); /* print solution stats */
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
N_VCopyFromDevice_OpenMPDEV(y); /* copy back from device before printing solution */
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
}
printf(" -------------------------\n");
fclose(UFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumLinIters(arkode_mem, &nli);
check_flag(&flag, "ARKStepGetNumLinIters", 1);
flag = ARKStepGetNumJtimesEvals(arkode_mem, &nJv);
check_flag(&flag, "ARKStepGetNumJtimesEvals", 1);
flag = ARKStepGetNumLinConvFails(arkode_mem, &nlcf);
check_flag(&flag, "ARKStepGetNumLinConvFails", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total linear iterations = %li\n", nli);
printf(" Total number of Jacobian-vector products = %li\n", nJv);
printf(" Total number of linear solver convergence failures = %li\n", nlcf);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n", netf);
/* Clean up and return with successful completion */
N_VDestroy(y); /* Free vectors */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
SUNContext_Free(&ctx); /* Free context */
return 0;
}
/*--------------------------------
* Functions called by the solver
*--------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype k = udata->k;
realtype dx = udata->dx;
realtype *Y=NULL, *Ydot=NULL;
realtype c1, c2;
sunindextype i, isource;
int dev;
dev = omp_get_default_device();
Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); /* access data arrays */
if (check_flag((void *) Y, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot);
if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
N_VConst(0.0, ydot); /* Initialize ydot to zero */
/* iterate over domain, computing all equations */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
isource = N/2;
#pragma omp target map(to:c1,c2,isource,N,dx) is_device_ptr(Ydot,Y) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i=1; i<N-1; i++)
Ydot[i] = c1*Y[i-1] + c2*Y[i] + c1*Y[i+1];
#pragma omp target is_device_ptr(Ydot) device(dev)
{
Ydot[isource] += 0.01/dx; /* source term */
}
return 0; /* Return with success */
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp)
{
UserData udata = (UserData) user_data; /* variable shortcuts */
sunindextype N = udata->N;
realtype k = udata->k;
realtype dx = udata->dx;
realtype *V=NULL, *JV=NULL;
realtype c1, c2;
sunindextype i;
int dev;
dev = omp_get_default_device();
V = N_VGetDeviceArrayPointer_OpenMPDEV(v); /* access data arrays */
if (check_flag((void *) V, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv);
if (check_flag((void *) JV, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
N_VConst(0.0, Jv); /* initialize Jv product to zero */
/* iterate over domain, computing all Jacobian-vector products */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
#pragma omp target map(to:c1,c2,N) is_device_ptr(JV,V) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i=1; i<N-1; i++)
JV[i] = c1*V[i-1] + c2*V[i] + c1*V[i+1];
return 0; /* Return with success */
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#include <dmlc/omp.h>
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
workspace_(workspace), blank_label_(blank_label) {
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
void* workspace_;
int blank_label_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation);
denom += probs[r + col_offset];
}
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] /= denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
17_omp_sharing_semantics.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
extern void MPI_Send(void*, int);
void foo(int count) {
// firstprivate > every thread has a private copy (d.addr) of d (which is passes to outlined region for copy)
// check-inst: define {{.*}} @foo
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d.addr = alloca i64, align 8
// check-inst-NEXT: %0 = bitcast i64* %d.addr to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 3, i64 1)
#pragma omp parallel for schedule(dynamic, 1) firstprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
}
void bar(int count) {
// lastprivate - value of d is copied to "private_val" (which is tracked) in outlined region and thus not tracked.
// --> see "void bar2()" for different scenario with tracking of "d"
// check-inst: define {{.*}} @bar
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d4 = alloca i32
// check-inst-NEXT: %0 = bitcast i32* %d4 to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) lastprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
}
void bar2(int count) {
// check-inst: define {{.*}} @bar2
// check-inst: %d = alloca
// check-inst-NEXT: %0 = bitcast i32* %d to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d4 = alloca i32
// check-inst-NEXT: %0 = bitcast i32* %d4 to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) lastprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
MPI_Send((void*)&d, e);
}
void foo_bar(int count) {
// private: d, e are "randomly" initialised values inside outlined region (outter d,e are not passed)
// check-inst: define {{.*}} @foo_bar
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d = alloca
// check-inst-NEXT: %0 = bitcast i32* %d to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) private(d, e)
for (int i = 0; i < count; ++i) {
MPI_Send((void*)&d, e);
}
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 5
// CHECK-NEXT: Global : 0
|
8389.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute parallel for simd num_threads(4) private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
kernel.h | //
// Notes:
//
// 1) strategy: one thread per node in the 2D block;
// after initialisation it marches in the k-direction
// working with 3 planes of data at a time
//
// 2) each thread also loads in data for at most one halo node;
// assumes the number of halo nodes is not more than the
// number of interior nodes
//
// 3) corner halo nodes are included because they are needed
// for more general applications with cross-derivatives
//
// 4) could try double-buffering in the future fairly easily
//
// define kernel block size
#define BLOCK_X 32
#define BLOCK_Y 8
#define IOFF 1
#define JOFF (BLOCK_X+2)
#define KOFF (BLOCK_X+2)*(BLOCK_Y+2)
#define INDEX(i,j,j_off) (i + (j) * (j_off))
// device code
void laplace3d(
int NX, int NY, int NZ, int pitch,
const float *__restrict u1,
float *__restrict u2)
{
#pragma omp target teams distribute parallel for collapse(3) thread_limit(BLOCK_X*BLOCK_Y)
for (int k=0; k<NZ; k++) {
for (int j=0; j<NY; j++) {
for (int i=0; i<NX; i++) { // i loop innermost for sequential memory access
int ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1) {
u2[ind] = u1[ind]; // Dirichlet b.c.'s
}
else {
u2[ind] = ( u1[ind-1 ] + u1[ind+1 ]
+ u1[ind-NX ] + u1[ind+NX ]
+ u1[ind-NX*NY] + u1[ind+NX*NY] ) * 1.0f/6.0f;
}
}
}
}
}
|
distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}}
#pragma omp distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp distribute parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp distribute parallel for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp distribute parallel for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp distribute parallel for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp distribute parallel for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp distribute parallel for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp distribute parallel for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp distribute parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_binop__band_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__band_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int8)
// C=scalar+B GB (_bind1st__band_int8)
// C=scalar+B' GB (_bind1st_tran__band_int8)
// C=A+scalar GB (_bind2nd__band_int8)
// C=A'+scalar GB (_bind2nd_tran__band_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT8 || GxB_NO_BAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__band_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__band_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ejercicio6.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv){
int i, n=20, a[n],suma=10;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) {n=20; printf("n=%d",n);}
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for reduction(+:suma)
for (i=0; i<n; i++) suma += a[i];
printf("Tras 'parallel' suma=%d\n",suma);
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(Index shift=0; shift<threads; ++shift)
{
Index i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((std::size_t(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((std::size_t(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::evalTo(dst, lhs, rhs);
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::addTo(dst, lhs, rhs);
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::subTo(dst, lhs, rhs);
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
rose_functionCall.c | //! Contributed by Jeff Keasler
#include "omp.h"
typedef double real8;
extern void OtherFunc(int k,real8 *l,real8 *m,real8 *n,real8 *o,real8 *p,real8 q,real8 r,real8 s[3]);
void foo(int istart,int iend,real8 *a,real8 *b,real8 *c,int k,real8 *l,real8 *m,real8 *n,real8 *o,real8 *p)
{
for (int i = istart; i <= iend - 1; i += 1) {
real8 s[3];
real8 afi = a[i];
real8 bfi = b[i];
OtherFunc(k,l,m,n,o,p,afi,bfi,s);
#pragma omp parallel for
for (int k = 0; k <= 2; k += 1) {
c[3 * i + k] = s[k];
}
}
}
|
omp-single-1.c | extern void abort (void);
int
main()
{
int i = 0;
#pragma omp parallel shared (i)
{
#pragma omp single
{
i++;
}
}
if (i != 1)
abort ();
return 0;
}
|
omp_get_num_threads.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_get_num_threads()
{
/* checks that omp_get_num_threads is equal to the number of
threads */
int nthreads_lib;
int nthreads = 0;
nthreads_lib = -1;
#pragma omp parallel
{
#pragma omp critical
{
nthreads++;
} /* end of critical */
#pragma omp single
{
nthreads_lib = omp_get_num_threads ();
} /* end of single */
} /* end of parallel */
return (nthreads == nthreads_lib);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_get_num_threads()) {
num_failed++;
}
}
return num_failed;
}
|
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/error.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
#include "openmc/random_lcg.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
extern std::vector<Plot> plots; //!< Plot instance container
extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter
extern int plotter_stream; // Stream index used by the plotter
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const std::vector<int> &v) {
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
bool operator ==(const RGBColor& other) {
return red == other.red && green == other.green && blue == other.blue;
}
// Members
uint8_t red, green, blue;
};
// some default colors
const RGBColor WHITE {255, 255, 255};
const RGBColor RED {255, 0, 0};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
std::array<size_t, 3> pixels_; //!< Plot size in pixels
bool color_overlaps_; //!< Show overlapping cells?
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
default:
UNREACHABLE();
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord_[0].universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord_ = 1;
// local variables
bool found_cell = exhaustive_find_cell(p);
j = p.n_coord_ - 1;
if (level >= 0) { j = level; }
if (found_cell) {
data.set_value(y, x, p, j);
}
if (color_overlaps_ && check_cell_overlap(p, false)) {
data.set_overlap(y, x);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
void set_overlap_color(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_ {WHITE}; //!< Plot background color
RGBColor overlap_color_ {RED}; //!< Plot overlap color
std::vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot const& pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot const& pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot const& pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot const& pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
GB_binop__ge_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_uint32
// A.*B function (eWiseMult): GB_AemultB__ge_uint32
// A*D function (colscale): GB_AxD__ge_uint32
// D*A function (rowscale): GB_DxB__ge_uint32
// C+=B function (dense accum): GB_Cdense_accumB__ge_uint32
// C+=b function (dense accum): GB_Cdense_accumb__ge_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint32
// C=scalar+B GB_bind1st__ge_uint32
// C=scalar+B' GB_bind1st_tran__ge_uint32
// C=A+scalar GB_bind2nd__ge_uint32
// C=A'+scalar GB_bind2nd_tran__ge_uint32
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shared-clause.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
main()
{
int i, n = 7;
int a[n];
for (i=0; i<n; i++) a[i] = i+1;
#pragma omp parallel for shared(a) default(none)
for (i=0; i<n; i++) a[i] += i;
printf("Después de parallel for:\n");
for (i=0; i<n; i++)
printf("a[%d] = %d\n",i,a[i]);
}
|
kmeans_clustering.c | /*****************************************************************************/
/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */
/*By downloading, copying, installing or using the software you agree */
/*to this license. If you do not agree to this license, do not download, */
/*install, copy or use the software. */
/* */
/* */
/*Copyright (c) 2005 Northwestern University */
/*All rights reserved. */
/*Redistribution of the software in source and binary forms, */
/*with or without modification, is permitted provided that the */
/*following conditions are met: */
/* */
/*1 Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/*2 Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution.*/
/* */
/*3 Neither the name of Northwestern University nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* */
/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */
/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */
/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */
/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */
/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */
/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */
/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */
/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/*POSSIBILITY OF SUCH DAMAGE. */
/******************************************************************************/
/*************************************************************************/
/** File: kmeans_clustering.c **/
/** Description: Implementation of regular k-means clustering **/
/** algorithm **/
/** Author: Wei-keng Liao **/
/** ECE Department, Northwestern University **/
/** email: wkliao@ece.northwestern.edu **/
/** **/
/** Edited by: Jay Pisharath **/
/** Northwestern University. **/
/** **/
/** ================================================================ **/
/** **/
/** Edited by: Sang-Ha Lee **/
/** University of Virginia **/
/** **/
/** Description: No longer supports fuzzy c-means clustering; **/
/** only regular k-means clustering. **/
/** Simplified for main functionality: regular k-means **/
/** clustering. **/
/** **/
/*************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include "kmeans.h"
#include <omp.h>
#define RANDOM_MAX 2147483647
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
extern double wtime(void);
extern int num_omp_threads;
int find_nearest_point(float *pt, /* [nfeatures] */
int nfeatures,
float **pts, /* [npts][nfeatures] */
int npts)
{
int index, i;
float min_dist=FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i=0; i<npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
return(index);
}
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline
float euclid_dist_2(float *pt1,
float *pt2,
int numdims)
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]);
return(ans);
}
/*----< kmeans_clustering() >---------------------------------------------*/
float** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int *membership) /* out: [npoints] */
{
int i, j, k, n=0, index, loop=0;
int *new_centers_len; /* [nclusters]: no. of points in each cluster */
float **new_centers; /* [nclusters][nfeatures] */
float **clusters; /* out: [nclusters][nfeatures] */
float delta;
double timing;
int nthreads;
int **partial_new_centers_len;
float ***partial_new_centers;
nthreads = num_omp_threads;
/* allocate space for returning variable clusters[] */
clusters = (float**) malloc(nclusters * sizeof(float*));
clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float));
for (i=1; i<nclusters; i++)
clusters[i] = clusters[i-1] + nfeatures;
/* randomly pick cluster centers */
for (i=0; i<nclusters; i++) {
//n = (int)rand() % npoints;
for (j=0; j<nfeatures; j++)
clusters[i][j] = feature[n][j];
n++;
}
for (i=0; i<npoints; i++)
membership[i] = -1;
/* need to initialize new_centers_len and new_centers[0] to all 0 */
new_centers_len = (int*) calloc(nclusters, sizeof(int));
new_centers = (float**) malloc(nclusters * sizeof(float*));
new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float));
for (i=1; i<nclusters; i++)
new_centers[i] = new_centers[i-1] + nfeatures;
partial_new_centers_len = (int**) malloc(nthreads * sizeof(int*));
partial_new_centers_len[0] = (int*) calloc(nthreads*nclusters, sizeof(int));
for (i=1; i<nthreads; i++)
partial_new_centers_len[i] = partial_new_centers_len[i-1]+nclusters;
partial_new_centers =(float***)malloc(nthreads * sizeof(float**));
partial_new_centers[0] =(float**) malloc(nthreads*nclusters * sizeof(float*));
for (i=1; i<nthreads; i++)
partial_new_centers[i] = partial_new_centers[i-1] + nclusters;
for (i=0; i<nthreads; i++)
{
for (j=0; j<nclusters; j++)
partial_new_centers[i][j] = (float*)calloc(nfeatures, sizeof(float));
}
printf("num of threads = %d\n", num_omp_threads);
do {
delta = 0.0;
omp_set_num_threads(num_omp_threads);
#pragma omp parallel \
shared(feature,clusters,membership,partial_new_centers,partial_new_centers_len)
{
int tid = omp_get_thread_num();
#pragma omp for \
private(i,j,index) \
firstprivate(npoints,nclusters,nfeatures) \
schedule(static) \
reduction(+:delta)
for (i=0; i<npoints; i++) {
/* find the index of nestest cluster centers */
index = find_nearest_point(feature[i],
nfeatures,
clusters,
nclusters);
/* if membership changes, increase delta by 1 */
if (membership[i] != index) delta += 1.0;
/* assign the membership to object i */
membership[i] = index;
/* update new cluster centers : sum of all objects located
within */
partial_new_centers_len[tid][index]++;
for (j=0; j<nfeatures; j++)
partial_new_centers[tid][index][j] += feature[i][j];
}
} /* end of #pragma omp parallel */
/* let the main thread perform the array reduction */
for (i=0; i<nclusters; i++) {
for (j=0; j<nthreads; j++) {
new_centers_len[i] += partial_new_centers_len[j][i];
partial_new_centers_len[j][i] = 0.0;
for (k=0; k<nfeatures; k++) {
new_centers[i][k] += partial_new_centers[j][i][k];
partial_new_centers[j][i][k] = 0.0;
}
}
}
/* replace old cluster centers with new_centers */
for (i=0; i<nclusters; i++) {
for (j=0; j<nfeatures; j++) {
if (new_centers_len[i] > 0)
clusters[i][j] = new_centers[i][j] / new_centers_len[i];
new_centers[i][j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
} while (delta > threshold && loop++ < 500);
free(new_centers[0]);
free(new_centers);
free(new_centers_len);
return clusters;
}
|
wpapmk.h | /*
* This software is Copyright (c) 2012 Lukas Odzioba <lukas dot odzioba at gmail dot com>
* and Copyright (c) 2012-2017 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* hccap format was introduced by oclHashcat-plus (now renamed to hashcat),
* and it is described here: http://hashcat.net/wiki/hccap
* Code is based on Aircrack-ng source
*/
#ifndef _WPAPMK_H
#define _WPAPMK_H
#include <stdint.h>
#include <assert.h>
#if HAVE_OPENSSL_CMAC_H
#include <openssl/cmac.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "hmacmd5.h"
#include "hmac_sha.h"
#include "sha2.h"
#include "base64_convert.h"
#include "hccap.h"
#define BINARY_SIZE sizeof(mic_t)
#define BINARY_ALIGN 4
#define PLAINTEXT_MIN_LEN 64
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t))
#define SALT_ALIGN MEM_ALIGN_NONE
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define FORMAT_TAG "$WPAPSK$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
typedef struct
{
unsigned char keymic[16];
} mic_t;
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH + 1];
} wpapsk_password;
typedef struct {
uint32_t v[8];
} wpapsk_hash;
typedef struct {
uint32_t length;
#ifdef JOHN_OCL_WPAPMK
uint8_t eapol[256 + 64];
uint32_t eapol_size;
uint8_t data[64 + 12];
#endif
uint8_t salt[36]; // essid
} wpapsk_salt;
static struct fmt_tests tests[] = {
{"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "cdd79a5acfb070c7e9d1023b870285d639e430b32f31aa37ac825a55b55524ee"},
{"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "a288fcf0caaacda9a9f58633ff35e8992a01d9c10ba5e02efdf8cb5d730ce7bc"},
#if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPMK)
{"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "fb57668cd338374412c26208d79aa5c30ce40a110224f3cfb592a8f2e8bf53e8"},
#endif
{NULL}
};
/** Below are common variables used by wpapmk_fmt.c and opencl_wpapmk_fmt.c **/
static hccap_t hccap; ///structure with hccap data
static wpapsk_salt currentsalt; ///structure for essid
static mic_t *mic; ///table for MIC keys
#ifndef JOHN_OCL_WPAPMK
static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU
#endif
/** Below are common functions used by wpapmk_fmt.c and opencl_wpapmk_fmt.c **/
static hccap_t *decode_hccap(char *ciphertext)
{
static hccap_t hccap;
char *essid = ciphertext + FORMAT_TAG_LEN;
char *hash = strrchr(ciphertext, '#');
char *d = hccap.essid;
char *cap = hash + 1;
unsigned char tbuf[sizeof(hccap_t)];
unsigned char *dst = tbuf;
int i;
memset(&hccap, 0, sizeof(hccap));
if (hash == NULL)
return &hccap;
while (essid != hash) { ///copy essid to hccap
*d++ = *essid++;
}
*d = '\0';
assert(*essid == '#');
for (i = 0; i < 118; i++) {
dst[0] =
(atoi64[ARCH_INDEX(cap[0])] << 2) |
(atoi64[ARCH_INDEX(cap[1])] >> 4);
dst[1] =
(atoi64[ARCH_INDEX(cap[1])] << 4) |
(atoi64[ARCH_INDEX(cap[2])] >> 2);
dst[2] =
(atoi64[ARCH_INDEX(cap[2])] << 6) |
(atoi64[ARCH_INDEX(cap[3])]);
dst += 3;
cap += 4;
}
dst[0] =
(atoi64[ARCH_INDEX(cap[0])] << 2) |
(atoi64[ARCH_INDEX(cap[1])] >> 4);
dst[1] =
(atoi64[ARCH_INDEX(cap[1])] << 4) |
(atoi64[ARCH_INDEX(cap[2])] >> 2);
/* This emits warnings on some compilers */
//memcpy(&hccap.mac1,tbuf,sizeof(hccap_t)-36);
memcpy(((char*)&hccap) + 36, tbuf, sizeof(hccap_t) - 36);
#if !ARCH_LITTLE_ENDIAN
hccap.eapol_size = JOHNSWAP(hccap.eapol_size);
hccap.keyver = JOHNSWAP(hccap.keyver);
#endif
return &hccap;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
uint32_t dummy;
} binary;
hccap_t *hccap = decode_hccap(ciphertext);
memcpy(binary.c, hccap->keymic, BINARY_SIZE);
return binary.c;
}
static void *get_salt(char *ciphertext)
{
static hccap_t s;
memcpy(&s, decode_hccap(ciphertext), SALT_SIZE);
return &s;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *hash;
int hashlength = 0;
hccap_t *hccap;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
hash = strrchr(ciphertext, '#');
if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32)
return 0;
hash++;
while (hash < ciphertext + strlen(ciphertext)) {
if (atoi64[ARCH_INDEX(*hash++)] == 0x7f)
return 0;
hashlength++;
}
if (hashlength != 475)
return 0;
hccap = decode_hccap(ciphertext);
if (strlen(hccap->essid) > 32) /* real life limit */
return 0;
if (hccap->eapol_size > 256)
return 0;
if (hccap->keyver < 1)
return 0;
#if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPMK)
if (hccap->keyver > 3)
return 0;
#else
if (hccap->keyver > 2)
return 0;
#endif
return 1;
}
#ifndef JOHN_OCL_WPAPMK
static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret)
{
char *text = (char*)"Pairwise key expansion";
unsigned char buff[100];
memcpy(buff, text, 22);
memcpy(buff + 23, data, 76);
buff[22] = 0;
buff[76 + 23] = 0;
hmac_sha1((unsigned char*)key, 32, buff, 100, (unsigned char*)ret, 20);
}
#endif
static void insert_mac(uint8_t * data)
{
int k = memcmp(hccap.mac1, hccap.mac2, 6);
if (k > 0) {
memcpy(data, hccap.mac2, 6);
memcpy(data + 6, hccap.mac1, 6);
} else {
memcpy(data, hccap.mac1, 6);
memcpy(data + 6, hccap.mac2, 6);
}
}
static void insert_nonce(uint8_t * data)
{
int k = memcmp(hccap.nonce1, hccap.nonce2, 32);
if (k > 0) {
memcpy(data, hccap.nonce2, 32);
memcpy(data + 32, hccap.nonce1, 32);
} else {
memcpy(data, hccap.nonce1, 32);
memcpy(data + 32, hccap.nonce2, 32);
}
}
static void set_salt(void *salt)
{
memcpy(&hccap, salt, SALT_SIZE);
strncpy((char*)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt));
currentsalt.length = strlen(hccap.essid);
#ifdef JOHN_OCL_WPAPMK
currentsalt.eapol_size = hccap.eapol_size;
memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size);
memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1);
memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1);
if (hccap.keyver == 2)
alter_endianity(currentsalt.eapol, 256+56);
((unsigned int*)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3;
insert_mac(currentsalt.data);
insert_nonce(currentsalt.data + 12);
if (hccap.keyver < 3)
alter_endianity(currentsalt.data, 64 + 12);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), ¤tsalt, 0, NULL, NULL), "Copy setting to gpu");
#endif
//Debug_hccap();
}
#ifndef JOHN_OCL_WPAPMK
#if HAVE_OPENSSL_CMAC_H
/* Code borrowed from https://w1.fi/wpa_supplicant/ starts */
#define SHA256_MAC_LEN 32
typedef uint16_t u16;
typedef uint8_t u8;
static inline void WPA_PUT_LE16(u8 *a, u16 val)
{
a[1] = val >> 8;
a[0] = val & 0xff;
}
static void sha256_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac)
{
SHA256_CTX ctx;
size_t i;
SHA256_Init(&ctx);
for (i = 0; i < num_elem; i++) {
SHA256_Update(&ctx, addr[i], len[i]);
}
SHA256_Final(mac, &ctx);
}
static void hmac_sha256_vector(const u8 *key, size_t key_len, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */
const u8 *_addr[6];
size_t _len[6], i;
/* the HMAC_SHA256 transform looks like:
*
* SHA256(K XOR opad, SHA256(K XOR ipad, text))
*
* where K is an n byte key
* ipad is the byte 0x36 repeated 64 times
* opad is the byte 0x5c repeated 64 times
* and text is the data being protected */
/* start out by storing key in ipad */
memset(k_pad, 0, sizeof(k_pad));
memcpy(k_pad, key, key_len);
/* XOR key with ipad values */
for (i = 0; i < 64; i++)
k_pad[i] ^= 0x36;
/* perform inner SHA256 */
_addr[0] = k_pad;
_len[0] = 64;
for (i = 0; i < num_elem; i++) {
_addr[i + 1] = addr[i];
_len[i + 1] = len[i];
}
sha256_vector(1 + num_elem, _addr, _len, mac);
memset(k_pad, 0, sizeof(k_pad));
memcpy(k_pad, key, key_len);
/* XOR key with opad values */
for (i = 0; i < 64; i++)
k_pad[i] ^= 0x5c;
/* perform outer SHA256 */
_addr[0] = k_pad;
_len[0] = 64;
_addr[1] = mac;
_len[1] = SHA256_MAC_LEN;
sha256_vector(2, _addr, _len, mac);
}
static void sha256_prf_bits(const u8 *key, size_t key_len, const char *label,
const u8 *data, size_t data_len, u8 *buf, size_t buf_len_bits)
{
u16 counter = 1;
size_t pos, plen;
u8 hash[SHA256_MAC_LEN];
const u8 *addr[4];
size_t len[4];
u8 counter_le[2], length_le[2];
size_t buf_len = (buf_len_bits + 7) / 8;
addr[0] = counter_le;
len[0] = 2;
addr[1] = (u8 *) label;
len[1] = strlen(label);
addr[2] = data;
len[2] = data_len;
addr[3] = length_le;
len[3] = sizeof(length_le);
WPA_PUT_LE16(length_le, buf_len_bits);
pos = 0;
while (pos < buf_len) {
plen = buf_len - pos;
WPA_PUT_LE16(counter_le, counter);
if (plen >= SHA256_MAC_LEN) {
hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]);
pos += SHA256_MAC_LEN;
} else {
hmac_sha256_vector(key, key_len, 4, addr, len, hash);
memcpy(&buf[pos], hash, plen);
pos += plen;
break;
}
counter++;
}
/*
* Mask out unused bits in the last octet if it does not use all the
* bits.
*/
if (buf_len_bits % 8) {
u8 mask = 0xff << (8 - buf_len_bits % 8);
buf[pos - 1] &= mask;
}
}
#endif /* HAVE_OPENSSL_CMAC_H */
/* Code borrowed from https://w1.fi/wpa_supplicant/ ends */
static void wpapsk_postprocess(int keys)
{
int i;
uint8_t data[64 + 12];
insert_mac(data);
insert_nonce(data + 12);
if (hccap.keyver == 1) {
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
uint32_t prf[20/4];
HMACMD5Context ctx;
prf_512(outbuffer[i].v, data, prf); // PTK
hmac_md5_init_K16((unsigned char*)prf, &ctx);
hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx);
hmac_md5_final(mic[i].keymic, &ctx);
}
} else if (hccap.keyver == 2) {
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
uint32_t prf[20/4];
prf_512(outbuffer[i].v, data, prf); // PTK
hmac_sha1((unsigned char*)prf, 16, hccap.eapol,
hccap.eapol_size, mic[i].keymic, 16);
}
#if HAVE_OPENSSL_CMAC_H
} else if (hccap.keyver == 3) { // 802.11w, WPA-PSK-SHA256
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
unsigned char ptk[48];
unsigned char cmic[16];
size_t miclen;
CMAC_CTX *ctx;
sha256_prf_bits((unsigned char*)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); // PTK
// Compute MIC
ctx = CMAC_CTX_new();
CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0);
CMAC_Update(ctx, hccap.eapol, hccap.eapol_size);
CMAC_Final(ctx, cmic, &miclen);
memcpy(mic[i].keymic, cmic, 16);
CMAC_CTX_free(ctx);
}
#endif /* HAVE_OPENSSL_CMAC_H */
}
}
#endif /* #ifndef JOHN_OCL_WPAPMK */
static int get_hash_0(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_6;
}
static int cmp_all(void *binary, int count)
{
uint32_t i, b = ((uint32_t *) binary)[0];
for (i = 0; i < count; i++) {
uint32_t *m = (uint32_t*) mic[i].keymic;
if (b == m[0])
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
uint8_t i;
uint32_t *b = (uint32_t*) binary;
uint32_t *m = (uint32_t*) mic[index].keymic;
for (i = 0; i < BINARY_SIZE / 4; i++)
if (b[i] != m[i])
return 0;
return 1;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int salt_compare(const void *x, const void *y)
{
int c = strncmp((const char*)x, (const char*)y, 36);
if (c) return c;
return memcmp((const char*)x, (const char*)y, SALT_SIZE);
}
/*
* key version as first tunable cost
* 1=WPA (MD5)
* 2=WPA2 (SHA1)
* 3=802.11w (SHA256)
*/
static unsigned int get_keyver(void *salt)
{
hccap_t *my_salt = salt;
return (unsigned int) my_salt->keyver;
}
#endif
|
GB_unaryop__minv_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_uint32
// op(A') function: GB_tran__minv_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ft_ao.c | /*
* Fourier transformed AO pair
* \int e^{-i Gv \cdot r} i(r) * j(r) dr^3
*
* eval_gz, b, gxyz, gs:
* - when eval_gz is GTO_Gv_uniform_orth
* > b (reciprocal vectors) is diagonal 3x3 matrix
* > Gv k-space grids = dot(b.T,gxyz)
* > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv])
* > gs[3]: The number of *positive* G-vectors along each direction.
* - when eval_gz is GTO_Gv_uniform_nonorth
* > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T
* > Gv k-space grids = dot(b.T,gxyz)
* > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv])
* > gs[3]: The number of *positive* G-vectors along each direction.
* - when eval_gz is GTO_Gv_general
* only Gv is needed
* - when eval_gz is GTO_Gv_nonuniform_orth
* > b is the basic G value for each cartesian component
* Gx = b[:gs[0]]
* Gy = b[gs[0]:gs[0]+gs[1]]
* Gz = b[gs[0]+gs[1]:]
* > gs[3]: Number of basic G values along each direction.
* > gxyz[3,nGv] are used to index the basic G value
* > Gv is not used
*/
/*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "gto/ft_ao.h"
#define SQRTPI 1.7724538509055160272981674833411451
#define EXPCUTOFF 100
#define NCTRMAX 72
void CINTg1e_index_xyz(int *idx, const CINTEnvVars *envs);
double CINTsquare_dist(const double *r1, const double *r2);
double CINTcommon_fac_sp(int l);
int CINTinit_int1e_EnvVars(CINTEnvVars *envs, const int *ng, const int *shls,
const int *atm, const int natm,
const int *bas, const int nbas, const double *env);
void GTO_ft_init1e_envs(CINTEnvVars *envs, const int *ng, const int *shls,
const int *atm, const int natm,
const int *bas, const int nbas, const double *env)
{
CINTinit_int1e_EnvVars(envs, ng, shls, atm, natm, bas, nbas, env);
int dli, dlj;
if (envs->li_ceil < envs->lj_ceil) {
dli = envs->li_ceil + 1;
dlj = envs->li_ceil + envs->lj_ceil + 1;
} else {
dli = envs->li_ceil + envs->lj_ceil + 1;
dlj = envs->lj_ceil + 1;
}
envs->g_stride_i = 1;
envs->g_stride_j = dli;
envs->g_size = dli * dlj;
}
static const int _LEN_CART[] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136
};
static const int _CUM_LEN_CART[] = {
1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816,
};
/*
* WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0]
* WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0]
* WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0]
*/
static const int _UPIDY[] = {
1,
3, 4,
6, 7, 8,
10, 11, 12, 13,
15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26,
28, 29, 30, 31, 32, 33, 34,
36, 37, 38, 39, 40, 41, 42, 43,
45, 46, 47, 48, 49, 50, 51, 52, 53,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,
105,106,107,108,109,110,111,112,113,114,115,116,117,118,
120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
};
static const int _UPIDZ[] = {
2,
4, 5,
7, 8, 9,
11, 12, 13, 14,
16, 17, 18, 19, 20,
22, 23, 24, 25, 26, 27,
29, 30, 31, 32, 33, 34, 35,
37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,
};
/*
* _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D
* recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i}
* _DOWN_XYZ_ORDER i in i/2a
* _DOWN2 index of f_{i-1}
* _DOWN_XYZ index of X
* _DOWN1 index of f_{i}
*/
static const int _DOWN1[] = {
-1,
0, 0, 0,
0, 1, 2, 1, 2, 2,
0, 0, 0, 3, 4, 5, 3, 3, 5, 5,
0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119,
};
static const int _DOWN2[] = {
-1,
-1, -1, -1,
0, -1, -1, 0, -1, 0,
0, -1, -1, -1, -1, -1, 1, -1, -1, 2,
0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104,
};
static const int _DOWN_XYZ[] = {
2,
0, 1, 2,
0, 0, 0, 1, 1, 2,
0, 1, 2, 0, 0, 0, 1, 2, 1, 2,
0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
};
static const int _DOWN_XYZ_ORDER[] = {
0,
0, 0, 0,
1, 0, 0, 1, 0, 1,
2, 0, 0, 0, 0, 0, 2, 0, 0, 2,
3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3,
4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4,
5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5,
6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6,
7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7,
8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8,
9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9,
10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10,
11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11,
12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12,
13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13,
14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14,
};
#define WHEREX_IF_L_INC1(i) i
#define WHEREY_IF_L_INC1(i) _UPIDY[i]
#define WHEREZ_IF_L_INC1(i) _UPIDZ[i]
#define STARTX_IF_L_DEC1(i) 0
#define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2])
#define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1)
#define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m]
#define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m]
#define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m]
#define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m]
static int vrr1d(double complex *g, double *rijri, double aij,
double *Gv, int topl, size_t NGv)
{
int cumxyz = 1;
if (topl == 0) {
return cumxyz;
}
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
int i, n, m, l;
double a2;
double complex *p0, *p1, *p2, *dec1, *dec2;
double *ka2 = malloc(sizeof(double) * NGv*3);
double *kxa2 = ka2;
double *kya2 = kxa2 + NGv;
double *kza2 = kya2 + NGv;
a2 = .5 / aij;
for (n = 0; n < NGv; n++) {
kxa2[n] = kx[n] * a2;
kya2[n] = ky[n] * a2;
kza2[n] = kz[n] * a2;
}
p0 = g + NGv;
for (n = 0; n < NGv; n++) {
p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n];
p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n];
p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n];
}
cumxyz += 3;
for (l = 1; l < topl; l++) {
p0 = g + cumxyz * NGv;
dec1 = p0 - _LEN_CART[l ] * NGv;
dec2 = dec1 - _LEN_CART[l-1] * NGv;
for (i = 0; i < _LEN_CART[l+1]; i++) {
m = DEC1_XYZ(l+1,i);
kxa2 = ka2 + m * NGv;
a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i);
p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv;
p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv;
if (ADDR_IF_L_DEC2(l+1,i) < 0) {
for (n = 0; n < NGv; n++) {
p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n];
}
} else {
for (n = 0; n < NGv; n++) {
p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n];
}
}
p0 += NGv;
}
cumxyz += _LEN_CART[l+1];
}
free(ka2);
return cumxyz;
}
/*
* if li = 3, lj = 1
* (10 + X*00 -> 01):
* gs + X*fs -> fp
*/
static void vrr2d_ket_inc1(double complex *out, const double complex *g,
double *rirj, int li, int lj, size_t NGv)
{
if (lj == 0) {
memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv);
return;
}
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
const double complex *g00 = g;
const double complex *g10 = g + row_00*col_00*NGv;
int i, j, n;
const double complex *p00, *p10;
double complex *p01 = out;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[0] * p00[n];
}
p01 += NGv;
} }
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[1] * p00[n];
}
p01 += NGv;
} }
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[2] * p00[n];
}
p01 += NGv;
} }
}
/*
* transpose i, j when store in out
*/
static void vrr2d_inc1_swapij(double complex *out, const double complex *g,
double *rirj, int li, int lj, size_t NGv)
{
if (lj == 0) {
memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv);
return;
}
const int row_01 = _LEN_CART[lj];
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
const double complex *g00 = g;
const double complex *g10 = g + row_00*col_00*NGv;
int i, j, n;
const double complex *p00, *p10;
double complex *p01 = out;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[0] * p00[n];
}
}
out += NGv;
}
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[1] * p00[n];
}
}
out += NGv;
}
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[2] * p00[n];
}
}
}
}
/* (li+lj,0) => (li,lj) */
static void vrr2d(double complex *out, double complex *g,
double complex *gbuf2, CINTEnvVars *envs, size_t NGv)
{
const int li = envs->li_ceil;
const int lj = envs->lj_ceil;
const int nmax = li + lj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double complex *g00, *g01, *gswap, *pg00, *pg01;
int row_01, col_01, row_00, col_00;
int i, j;
double rirj[3];
rirj[0] = ri[0] - rj[0];
rirj[1] = ri[1] - rj[1];
rirj[2] = ri[2] - rj[2];
g00 = gbuf2;
g01 = g;
for (j = 1; j < lj; j++) {
gswap = g00;
g00 = g01;
g01 = gswap;
pg00 = g00;
pg01 = g01;
for (i = li; i <= nmax-j; i++) {
vrr2d_ket_inc1(pg01, pg00, rirj, i, j, NGv);
row_01 = _LEN_CART[i];
col_01 = _LEN_CART[j];
row_00 = _LEN_CART[i ];
col_00 = _LEN_CART[j-1];
pg00 += row_00*col_00 * NGv;
pg01 += row_01*col_01 * NGv;
}
}
vrr2d_ket_inc1(out, g01, rirj, li, lj, NGv);
}
/* (0,li+lj) => (li,lj) */
static void hrr2d(double complex *out, double complex *g,
double complex *gbuf2, CINTEnvVars *envs, size_t NGv)
{
const int li = envs->li_ceil;
const int lj = envs->lj_ceil;
const int nmax = li + lj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double complex *g00, *g01, *gswap, *pg00, *pg01;
int row_01, col_01, row_00, col_00;
int i, j;
double rjri[3];
rjri[0] = rj[0] - ri[0];
rjri[1] = rj[1] - ri[1];
rjri[2] = rj[2] - ri[2];
g00 = gbuf2;
g01 = g;
for (i = 1; i < li; i++) {
gswap = g00;
g00 = g01;
g01 = gswap;
pg00 = g00;
pg01 = g01;
for (j = lj; j <= nmax-i; j++) {
vrr2d_ket_inc1(pg01, pg00, rjri, j, i, NGv);
row_01 = _LEN_CART[j];
col_01 = _LEN_CART[i];
row_00 = _LEN_CART[j ];
col_00 = _LEN_CART[i-1];
pg00 += row_00*col_00 * NGv;
pg01 += row_01*col_01 * NGv;
}
}
vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv);
}
/*
* Recursive relation
*/
static void aopair_rr_igtj_early(double complex *g, double ai, double aj,
CINTEnvVars *envs, void (*eval_gz)(),
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int topl = envs->li_ceil + envs->lj_ceil;
const double aij = ai + aj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rijri[3];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijri[0] = rij[0] - ri[0];
rijri[1] = rij[1] - ri[1];
rijri[2] = rij[2] - ri[2];
(*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv);
vrr1d(g, rijri, aij, Gv, topl, NGv);
}
static void aopair_rr_iltj_early(double complex *g, double ai, double aj,
CINTEnvVars *envs, void (*eval_gz)(),
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int topl = envs->li_ceil + envs->lj_ceil;
const double aij = ai + aj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rijrj[3];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijrj[0] = rij[0] - rj[0];
rijrj[1] = rij[1] - rj[1];
rijrj[2] = rij[2] - rj[2];
(*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv);
vrr1d(g, rijrj, aij, Gv, topl, NGv);
}
static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj,
CINTEnvVars *envs, void (*eval_gz)(),
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int nmax = envs->li_ceil + envs->lj_ceil;
const int lj = envs->lj_ceil;
const int dj = envs->g_stride_j;
const double aij = ai + aj;
const double a2 = .5 / aij;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rirj[3], rijri[3];
double complex *gx = g;
double complex *gy = gx + envs->g_size * NGv;
double complex *gz = gy + envs->g_size * NGv;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
size_t off0, off1, off2;
int i, j, n, ptr;
double ia2;
rirj[0] = ri[0] - rj[0];
rirj[1] = ri[1] - rj[1];
rirj[2] = ri[2] - rj[2];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijri[0] = rij[0] - ri[0];
rijri[1] = rij[1] - ri[1];
rijri[2] = rij[2] - ri[2];
for (n = 0; n < NGv; n++) {
gx[n] = 1;
gy[n] = 1;
}
(*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv);
if (nmax > 0) {
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n];
gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n];
gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n];
}
}
}
for (i = 1; i < nmax; i++) {
off0 = (i-1) * NGv;
off1 = i * NGv;
off2 = (i+1) * NGv;
ia2 = i * a2;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n];
gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n];
gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n];
}
}
}
for (j = 1; j <= lj; j++) {
ptr = dj * j;
for (i = ptr; i <= ptr + nmax - j; i++) {
off0 = i * NGv - dj * NGv; // [i, j-1]
off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1]
off2 = i * NGv; // [i, j ]
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n];
gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n];
gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n];
}
}
}
}
}
static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj,
CINTEnvVars *envs, void (*eval_gz)(),
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int nmax = envs->li_ceil + envs->lj_ceil;
const int li = envs->li_ceil;
const int dj = envs->g_stride_j;
const double aij = ai + aj;
const double a2 = .5 / aij;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rirj[3], rijrj[3];
double complex *gx = g;
double complex *gy = gx + envs->g_size * NGv;
double complex *gz = gy + envs->g_size * NGv;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
size_t off0, off1, off2;
int i, j, n;
double ia2;
rirj[0] = rj[0] - ri[0];
rirj[1] = rj[1] - ri[1];
rirj[2] = rj[2] - ri[2];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijrj[0] = rij[0] - rj[0];
rijrj[1] = rij[1] - rj[1];
rijrj[2] = rij[2] - rj[2];
for (n = 0; n < NGv; n++) {
gx[n] = 1;
gy[n] = 1;
}
(*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv);
if (nmax > 0) {
off0 = dj * NGv;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n];
gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n];
gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n];
}
}
}
for (i = 1; i < nmax; i++) {
off0 = (i-1) * dj * NGv;
off1 = i * dj * NGv;
off2 = (i+1) * dj * NGv;
ia2 = i * a2;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n];
gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n];
gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n];
}
}
}
for (i = 1; i <= li; i++) {
for (j = 0; j <= nmax - i; j++) {
off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ]
off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1]
off2 = i * NGv + j * dj * NGv; // [i ,j ]
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n];
gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n];
gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n];
}
}
}
}
}
static void inner_prod(double complex *g, double complex *gout,
int *idx, const CINTEnvVars *envs,
double *Gv, size_t NGv, int empty)
{
int ix, iy, iz, n, k;
double complex *gz = g + envs->g_size * NGv * 2;
if (empty) {
for (n = 0; n < envs->nf; n++) {
ix = idx[n*3+0];
iy = idx[n*3+1];
iz = idx[n*3+2];
for (k = 0; k < NGv; k++) {
if (gz[k] != 0) {
gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k];
} else {
gout[n*NGv+k] = 0;
}
}
}
} else {
for (n = 0; n < envs->nf; n++) {
ix = idx[n*3+0];
iy = idx[n*3+1];
iz = idx[n*3+2];
for (k = 0; k < NGv; k++) {
if (gz[k] != 0) {
gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k];
}
}
}
}
}
static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp,
const int nprim, const int nctr, const double *coeff,
int empty)
{
size_t n, i;
double c;
if (empty) {
for (n = 0; n < nctr; n++) {
c = coeff[nprim*n];
for (i = 0; i < nf; i++) {
gc[i] = gp[i] * c;
}
gc += nf;
}
} else {
for (n = 0; n < nctr; n++) {
c = coeff[nprim*n];
if (c != 0) {
for (i = 0; i < nf; i++) {
gc[i] += gp[i] * c;
}
}
gc += nf;
}
}
}
static void transpose(double complex *out, double complex *in,
int nf, int comp, size_t NGv)
{
size_t n, k, ic;
double complex *pin;
for (ic = 0; ic < comp; ic++) {
for (n = 0; n < nf; n++) {
pin = in + (n*comp+ic) * NGv;
for (k = 0; k < NGv; k++) {
out[n*NGv+k] = pin[k];
}
}
out += nf * NGv;
}
}
static const int _GBUFSIZE[] = {
1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384,
56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640,
120, 165, 243, 486, 816, 1360, 2040, 3030
};
#define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))]
int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs,
void (*eval_gz)(), double complex fac,
double *Gv, double *b, int *gxyz, int *gs, size_t NGv)
{
const int *shls = envs->shls;
const int *bas = envs->bas;
const double *env = envs->env;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
const int nf = envs->nf;
const double *ri = envs->ri;
const double *rj = envs->rj;
const double *ai = env + bas(PTR_EXP, i_sh);
const double *aj = env + bas(PTR_EXP, j_sh);
const double *ci = env + bas(PTR_COEFF, i_sh);
const double *cj = env + bas(PTR_COEFF, j_sh);
double fac1i, fac1j;
double aij, dij, eij;
int ip, jp, n;
int empty[2] = {1, 1};
int *jempty = empty + 0;
int *iempty = empty + 1;
const size_t len1 = bufsize(i_l,j_l) * NGv;
const size_t leni = len1 * i_ctr;
const size_t lenj = len1 * i_ctr * j_ctr;
double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1));
double complex *g = gctrj + lenj;
double complex *gctri, *g1d;
if (j_ctr == 1) {
gctri = gctrj;
iempty = jempty;
} else {
gctri = g;
g += leni;
}
g1d = g;
void (*aopair_rr)();
int offset_g1d;
if (i_l >= j_l) {
aopair_rr = aopair_rr_igtj_early;
offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l];
} else {
aopair_rr = aopair_rr_iltj_early;
offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l];
}
int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d;
double rrij = CINTsquare_dist(ri, rj);
double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l);
*jempty = 1;
for (jp = 0; jp < j_prim; jp++) {
if (j_ctr == 1) {
fac1j = fac1 * cj[jp];
} else {
fac1j = fac1;
*iempty = 1;
}
for (ip = 0; ip < i_prim; ip++) {
aij = ai[ip] + aj[jp];
eij = (ai[ip] * aj[jp] / aij) * rrij;
if (eij > EXPCUTOFF) {
continue;
}
dij = exp(-eij) / (aij * sqrt(aij));
fac1i = fac1j * dij;
(*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz,
fac*fac1i, Gv, b, gxyz, gs, NGv);
prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv,
i_prim, i_ctr, ci+ip, *iempty);
*iempty = 0;
}
if (!*iempty) {
if (j_ctr > 1) {
prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri,
j_prim,j_ctr, cj+jp, *jempty);
}
*jempty = 0;
}
}
if (!*jempty) {
g1d = gctrj;
for (n = 0; n < i_ctr*j_ctr; n++) {
if (i_l >= j_l) {
vrr2d(out+n*nf*NGv, g1d, gctrj+lenj, envs, NGv);
} else {
hrr2d(out+n*nf*NGv, g1d, gctrj+lenj, envs, NGv);
}
g1d += len_g1d * NGv;
}
}
free(gctrj);
return !*jempty;
}
int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs,
void (*eval_gz)(), double complex fac,
double *Gv, double *b, int *gxyz, int *gs, size_t NGv)
{
const int *shls = envs->shls;
const int *bas = envs->bas;
const double *env = envs->env;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor;
const int nf = envs->nf;
const double *ri = envs->ri;
const double *rj = envs->rj;
const double *ai = env + bas(PTR_EXP, i_sh);
const double *aj = env + bas(PTR_EXP, j_sh);
const double *ci = env + bas(PTR_COEFF, i_sh);
const double *cj = env + bas(PTR_COEFF, j_sh);
double fac1i, fac1j;
double aij, dij, eij;
int ip, jp;
int empty[3] = {1, 1, 1};
int *jempty = empty + 0;
int *iempty = empty + 1;
int *gempty = empty + 2;
const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv;
const size_t leng = nf * n_comp * NGv;
const size_t leni = nf * i_ctr * n_comp * NGv;
size_t lenj = 0;
if (n_comp > 1) {
lenj = nf * i_ctr * j_ctr * n_comp * NGv;
}
double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj));
double complex *g1 = g + len1;
double complex *gout, *gctri, *gctrj;
if (n_comp == 1) {
gctrj = gctr;
} else {
gctrj = g1;
g1 += lenj;
}
if (j_ctr == 1) {
gctri = gctrj;
iempty = jempty;
} else {
gctri = g1;
g1 += leni;
}
if (i_ctr == 1) {
gout = gctri;
gempty = iempty;
} else {
gout = g1;
}
void (*aopair_rr)();
if (i_l >= j_l) {
aopair_rr = aopair_rr_igtj_lazy;
} else {
aopair_rr = aopair_rr_iltj_lazy;
}
int *idx = malloc(sizeof(int) * nf * 3);
CINTg1e_index_xyz(idx, envs);
double rrij = CINTsquare_dist(ri, rj);
double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l);
*jempty = 1;
for (jp = 0; jp < j_prim; jp++) {
envs->aj = aj[jp];
if (j_ctr == 1) {
fac1j = fac1 * cj[jp];
} else {
fac1j = fac1;
*iempty = 1;
}
for (ip = 0; ip < i_prim; ip++) {
envs->ai = ai[ip];
aij = ai[ip] + aj[jp];
eij = (ai[ip] * aj[jp] / aij) * rrij;
if (eij > EXPCUTOFF) {
continue;
}
dij = exp(-eij) / (aij * sqrt(aij));
if (i_ctr == 1) {
fac1i = fac1j * dij * ci[ip];
} else {
fac1i = fac1j * dij;
}
(*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz,
fac*fac1i, Gv, b, gxyz, gs, NGv);
(*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty);
if (i_ctr > 1) {
prim_to_ctr(gctri, nf*n_comp*NGv, gout,
i_prim, i_ctr, ci+ip, *iempty);
}
*iempty = 0;
}
if (!*iempty) {
if (j_ctr > 1) {
prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri,
j_prim, j_ctr, cj+jp, *jempty);
}
*jempty = 0;
}
}
if (n_comp > 1 && !*jempty) {
transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv);
}
free(g);
free(idx);
return !*jempty;
}
void GTO_Gv_general(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
const double cutoff = EXPCUTOFF * aij * 4;
int n;
double kR, kk;
for (n = 0; n < NGv; n++) {
kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n];
if (kk < cutoff) {
kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2];
out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I);
} else {
out[n] = 0;
}
}
}
/*
* Gv = dot(b.T,gxyz) + kpt
* kk = dot(Gv, Gv)
* kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt)
* out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I);
*
* b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij},
* followed by 3*NGv floats for Gbase
*/
void GTO_Gv_orth(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int nx = gs[0];
const int ny = gs[1];
const int nz = gs[2];
double br[3]; // dot(rij, b)
br[0] = rij[0] * b[0];
br[1] = rij[1] * b[4];
br[2] = rij[2] * b[8];
double *kpt = b + 9;
double kr[3];
kr[0] = rij[0] * kpt[0];
kr[1] = rij[1] * kpt[1];
kr[2] = rij[2] * kpt[2];
double *Gxbase = b + 12;
double *Gybase = Gxbase + nx;
double *Gzbase = Gybase + ny;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
double complex zbuf[nx+ny+nz];
double complex *csx = zbuf;
double complex *csy = csx + nx;
double complex *csz = csy + ny;
double kkpool[nx+ny+nz];
double *kkx = kkpool;
double *kky = kkx + nx;
double *kkz = kky + ny;
int *gx = gxyz;
int *gy = gx + NGv;
int *gz = gy + NGv;
const double cutoff = EXPCUTOFF * aij * 4;
int n, ix, iy, iz;
double Gr, kk;
for (n = 0; n < nx+ny+nz; n++) {
kkpool[n] = -1;
}
for (n = 0; n < NGv; n++) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
if (kkx[ix] < 0) {
Gr = Gxbase[ix] * br[0] + kr[0];
kkx[ix] = .25 * kx[n]*kx[n] / aij;
csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kky[iy] < 0) {
Gr = Gybase[iy] * br[1] + kr[1];
kky[iy] = .25 * ky[n]*ky[n] / aij;
csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kkz[iz] < 0) {
Gr = Gzbase[iz] * br[2] + kr[2];
kkz[iz] = .25 * kz[n]*kz[n] / aij;
csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) {
out[n] = csx[ix] * csy[iy] * csz[iz];
} else {
out[n] = 0;
}
}
}
void GTO_Gv_nonorth(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv)
{
const int nx = gs[0];
const int ny = gs[1];
const int nz = gs[2];
double br[3]; // dot(rij, b)
br[0] = rij[0] * b[0];
br[0] += rij[1] * b[1];
br[0] += rij[2] * b[2];
br[1] = rij[0] * b[3];
br[1] += rij[1] * b[4];
br[1] += rij[2] * b[5];
br[2] = rij[0] * b[6];
br[2] += rij[1] * b[7];
br[2] += rij[2] * b[8];
double *kpt = b + 9;
double kr[3];
kr[0] = rij[0] * kpt[0];
kr[1] = rij[1] * kpt[1];
kr[2] = rij[2] * kpt[2];
double *Gxbase = b + 12;
double *Gybase = Gxbase + nx;
double *Gzbase = Gybase + ny;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
double complex zbuf[nx+ny+nz];
double complex *csx = zbuf;
double complex *csy = csx + nx;
double complex *csz = csy + ny;
char empty[nx+ny+nz];
char *xempty = empty;
char *yempty = xempty + nx;
char *zempty = yempty + ny;
memset(empty, 1, sizeof(char)*(nx+ny+nz));
int *gx = gxyz;
int *gy = gx + NGv;
int *gz = gy + NGv;
const double cutoff = EXPCUTOFF * aij * 4;
int n, ix, iy, iz;
double Gr, kk;
for (n = 0; n < NGv; n++) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n];
if (kk < cutoff) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
if (xempty[ix]) {
Gr = Gxbase[ix] * br[0] + kr[0];
csx[ix] = cos(Gr)-sin(Gr)*_Complex_I;
xempty[ix] = 0;
}
if (yempty[iy]) {
Gr = Gybase[iy] * br[1] + kr[1];
csy[iy] = cos(Gr)-sin(Gr)*_Complex_I;
yempty[iy] = 0;
}
if (zempty[iz]) {
Gr = Gzbase[iz] * br[2] + kr[2];
csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I);
zempty[iz] = 0;
}
out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz];
} else {
out[n] = 0;
}
}
}
static void zcopy_ij(double complex *out, const double complex *gctr,
const int mi, const int mj, const int ni, const size_t NGv)
{
int i, j, k;
for (j = 0; j < mj; j++) {
for (i = 0; i < mi; i++) {
for (k = 0; k < NGv; k++) {
out[i*NGv+k] = gctr[i*NGv+k];
}
}
out += ni * NGv;
gctr += mi * NGv;
}
}
void GTO_ft_c2s_cart(double complex *out, double complex *gctr,
int *dims, CINTEnvVars *envs, size_t NGv)
{
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int nfi = envs->nfi;
const int nfj = envs->nfj;
const int ni = nfi*i_ctr;
const int nj = nfj*j_ctr;
const int nf = envs->nf;
int ic, jc;
double complex *pout;
for (jc = 0; jc < nj; jc += nfj) {
for (ic = 0; ic < ni; ic += nfi) {
pout = out + (dims[0] * jc + ic) * NGv;
zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv);
gctr += nf * NGv;
} }
}
#define C2S(sph, nket, cart, l) \
(double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l)
#define OF_CMPLX 2
void GTO_ft_c2s_sph(double complex *out, double complex *gctr,
int *dims, CINTEnvVars *envs, size_t NGv)
{
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int di = i_l * 2 + 1;
const int dj = j_l * 2 + 1;
const int ni = di*i_ctr;
const int nj = dj*j_ctr;
const int nfi = envs->nfi;
const int nf = envs->nf;
int ic, jc, k;
const int buflen = nfi*dj;
double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv);
double complex *buf2 = buf1 + buflen * NGv;
double complex *pout, *pij, *buf;
for (jc = 0; jc < nj; jc += dj) {
for (ic = 0; ic < ni; ic += di) {
buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l);
pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l);
for (k = NGv; k < dj*NGv; k+=NGv) {
pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l);
}
pout = out + (dims[0] * jc + ic) * NGv;
zcopy_ij(pout, pij, di, dj, dims[0], NGv);
gctr += nf * NGv;
} }
free(buf1);
}
static void _ft_zset0(double complex *out, int *dims, int *counts,
int comp, size_t NGv)
{
double complex *pout;
int i, j, k, ic;
for (ic = 0; ic < comp; ic++) {
for (j = 0; j < counts[1]; j++) {
pout = out + j * counts[0] * NGv;
for (i = 0; i < counts[0]; i++) {
for (k = 0; k < NGv; k++) {
pout[i*NGv+k] = 0;
}
}
}
out += dims[0] * dims[1] * NGv;
}
}
/*************************************************
*
* eval_aopair is one of GTO_aopair_early_contract,
* GTO_aopair_lazy_contract
*
* eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth,
* GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth
*
*************************************************/
int GTO_ft_aopair_drv(double complex *out, int *dims,
int (*eval_aopair)(), void (*eval_gz)(), void (*f_c2s)(),
double complex fac, double *Gv, double *b, int *gxyz,
int *gs, size_t NGv, CINTEnvVars *envs)
{
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor;
const size_t nc = envs->nf * i_ctr * j_ctr * NGv;
double complex *gctr = malloc(sizeof(double complex) * nc * n_comp);
if (eval_gz == NULL) {
eval_gz = GTO_Gv_general;
}
if (eval_gz != GTO_Gv_general) {
assert(gxyz != NULL);
}
if (eval_aopair == NULL) {
const int *shls = envs->shls;
const int *bas = envs->bas;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
if (i_prim*j_prim < i_ctr*j_ctr*3) {
eval_aopair = GTO_aopair_lazy_contract;
} else {
eval_aopair = GTO_aopair_early_contract;
}
}
int has_value = (*eval_aopair)(gctr, envs, eval_gz,
fac, Gv, b, gxyz, gs, NGv);
int counts[4];
if (f_c2s == >O_ft_c2s_sph) {
counts[0] = (envs->i_l*2+1) * i_ctr;
counts[1] = (envs->j_l*2+1) * j_ctr;
} else { // f_c2s == >O_ft_c2s_cart
counts[0] = envs->nfi * i_ctr;
counts[1] = envs->nfj * j_ctr;
}
if (dims == NULL) {
dims = counts;
}
size_t nout = dims[0] * dims[1] * NGv;
int n;
if (has_value) {
for (n = 0; n < n_comp; n++) {
(*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv);
}
} else {
_ft_zset0(out, dims, counts, n_comp, NGv);
}
free(gctr);
return has_value;
}
int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims,
int (*eval_aopair)(), void (*eval_gz)(), double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
CINTEnvVars envs;
int ng[] = {0, 0, 0, 0, 0, 1, 0, 1};
GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env);
envs.f_gout = &inner_prod;
return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, >O_ft_c2s_cart,
fac, Gv, b, gxyz, gs, nGv, &envs);
}
int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims,
int (*eval_aopair)(), void (*eval_gz)(), double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
CINTEnvVars envs;
int ng[] = {0, 0, 0, 0, 0, 1, 0, 1};
GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env);
envs.f_gout = &inner_prod;
return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, >O_ft_c2s_sph,
fac, Gv, b, gxyz, gs, nGv, &envs);
}
/*************************************************
*
*************************************************/
static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv,
int comp, int nij, int ip, int di, int dj)
{
const size_t ip1 = ip + 1;
int i, j, n, ic;
double complex *pin, *pout;
for (ic = 0; ic < comp; ic++) {
pout = out + ic * nij * NGv;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + NGv * (j*di+i);
for (n = 0; n < NGv; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
}
}
static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv,
int comp, int nij, int ip, int di, int dj)
{
const size_t ip1 = ip + 1;
int i, j, n, ic;
double complex *pin, *pout;
for (ic = 0; ic < comp; ic++) {
pout = out + ic * nij * NGv;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pin = in + NGv * (j*di+i);
for (n = 0; n < NGv; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
}
}
void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
ish += ish0;
jsh += jsh0;
const int nrow = ao_loc[ish1] - ao_loc[ish0];
const int ncol = ao_loc[jsh1] - ao_loc[jsh0];
const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow;
int shls[2] = {ish, jsh};
int dims[2] = {nrow, ncol};
(*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int nrow = ao_loc[ish1] - ao_loc[ish0];
const int ncol = ao_loc[jsh1] - ao_loc[jsh0];
const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow;
const size_t NGv = nGv;
int shls[2] = {ish, jsh};
int dims[2] = {nrow, ncol};
(*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
if (ip != jp && ish0 == jsh0 && ish1 == jsh1) {
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
double complex *in = mat + off * NGv;
double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] +
(ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv;
int i, j, n, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + NGv * (j*nrow+i);
pout = out + NGv * (i*nrow+j);
for (n = 0; n < nGv; n++) {
pout[n] = pin[n];
}
}
}
out += nrow * ncol * NGv;
}
}
}
void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int i0 = ao_loc[ish0];
const size_t off0 = i0 * (i0 + 1) / 2;
const size_t off = ip * (ip + 1) / 2 - off0 + jp;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t NGv = nGv;
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
(*intor)(buf, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
if (ip != jp) {
zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj);
} else {
zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj);
}
}
/*
* Fourier transform AO pairs and add to mat (inplace)
*/
void GTO_ft_fill_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *mat, int comp,
int *shls_slice, int *ao_loc, double phase,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const double complex fac = cos(phase) + sin(phase)*_Complex_I;
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel default(none) \
shared(intor, eval_gz, eval_aopair, fill, mat, comp, shls_slice, \
ao_loc, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env)
{
int i, j, ij;
double complex *buf = malloc(sizeof(double complex)
* NCTRMAX*NCTRMAX*comp*(size_t)nGv);
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz, mat,
comp, i, j, buf, shls_slice, ao_loc, fac,
Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
}
}
/*
* Given npair of shls in shls_lst, FT their AO pair value and add to
* out (inplace)
*/
void GTO_ft_fill_shls_drv(int (*intor)(), void (*eval_gz)(),
double complex *out, int comp,
int npair, int *shls_lst, int *ao_loc, double phase,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int n, di, dj, ish, jsh;
int *ijloc = malloc(sizeof(int) * npair);
ijloc[0] = 0;
for (n = 1; n < npair; n++) {
ish = shls_lst[n*2-2];
jsh = shls_lst[n*2-1];
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
ijloc[n] = ijloc[n-1] + di*dj;
}
const double complex fac = cos(phase) + sin(phase)*_Complex_I;
const size_t NGv = nGv;
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel default(none) \
shared(intor, eval_gz, eval_aopair, out, comp, Gv, b, gxyz, gs, \
nGv, npair, shls_lst, ao_loc, \
atm, natm, bas, nbas, env, ijloc) \
private(n)
{
int ish, jsh;
int dims[2];
#pragma omp for schedule(dynamic)
for (n = 0; n < npair; n++) {
ish = shls_lst[n*2 ];
jsh = shls_lst[n*2+1];
dims[0] = ao_loc[ish+1] - ao_loc[ish];
dims[1] = ao_loc[jsh+1] - ao_loc[jsh];
(*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims,
eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv,
atm, natm, bas, nbas, env);
}
}
free(ijloc);
}
|
DRB098-simd2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
Two-dimension array computation with a vetorization directive
collapse(2) makes simd associate with 2 loops.
Loop iteration variables should be predetermined as lastprivate.
*/
#include <omp.h>
int main()
{
int len = 100;
double a[len][len];
double b[len][len];
double c[len][len];
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= len - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= len - 1; j += 1) {
a[i][j] = ((double )i) / 2.0;
b[i][j] = ((double )i) / 3.0;
c[i][j] = ((double )i) / 7.0;
}
}
#pragma omp parallel for private (i,j) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= len - 1; j += 1) {
c[i][j] = a[i][j] * b[i][j];
}
}
printf("c[50][50]=%f\n",c[50][50]);
return 0;
}
|
test_openmp.c | #include <stdbool.h>
#include <stdio.h>
#include <omp.h>
#include "test.h"
#include "gtmp.h"
int main(int argc, char **argv)
{
/* Get the number of barriers to test. */
if (argc < 2) {
printf("usage: %s <threads> <num_barriers>\n", argv[0]);
return -1;
}
/* First argument is number of threads. */
unsigned int num_threads = atoi(argv[1]);
/* Second argument is number of barriers. */
unsigned int num_barriers = atoi(argv[2]);
/* Any other argument means only do fast tests. */
bool fast = (argc > 3);
/* MPI parameters and init. */
omp_set_dynamic(0);
if (omp_get_dynamic()) {
printf("Warning: dynamic adjustment of threads has been set\n");
}
omp_set_num_threads(num_threads);
/* Local MPI init. */
gtmp_init(num_threads);
/* Run the tests. */
#pragma omp parallel
{
int rank = omp_get_thread_num();
run_test_harness(gtmp_barrier, rank, num_barriers, fast);
}
/* Close local MPI. */
gtmp_finalize();
return 0;
}
|
eval_cf.c | /*******************************************************************************
* 2pt/eval_cf.c: this file is part of the FCFC program.
* FCFC: Fast Correlation Function Calculator.
* Github repository:
https://github.com/cheng-zhao/FCFC
* Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license]
*******************************************************************************/
#include "eval_cf.h"
#include "count_func.h"
#include "read_file.h"
#include "read_res.h"
#include "save_res.h"
#include "legpoly.h"
#include "build_tree.h"
#include <stdlib.h>
#ifdef OMP
#include <omp.h>
#endif
/*============================================================================*\
Macros for error handling
\*============================================================================*/
#define CLEAN_TREE \
for (int ii = 0; ii < cf->ncat; ii++) \
tree_destroy(tree[ii], FCFC_TREE_TYPE_KDTREE); \
free(tree);
/*============================================================================*\
Functions for steps of correlation function evaluations
\*============================================================================*/
/******************************************************************************
Function `eval_pairs`:
Evaluate pair counts for the input catalogs.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_pairs(const CONF *conf, CF *cf) {
/* Allocate memory for trees. */
void **tree;
if (!(tree = malloc(sizeof(void *) * cf->ncat))) {
P_ERR("failed to allocate memory for trees\n");
return FCFC_ERR_MEMORY;
}
for (int i = 0; i < cf->ncat; i++) tree[i] = NULL;
int ny;
if (cf->bintype == FCFC_BIN_SPI) ny = cf->np;
else ny = cf->nmu; /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */
for (int i = 0; i < cf->npc; i++) {
/* Read pair counts from file if applicable. */
if (!cf->comp_pc[i]) {
printf("Reading %s pairs ...", conf->pc[i]);
if (conf->verbose) printf("\n Filename: %s\n", conf->pcout[i]);
fflush(stdout);
int e = read_pair_count(conf->pcout[i], cf->ns, ny, cf->ncnt[i]);
if (e) {
CLEAN_TREE;
return e;
}
printf(FMT_DONE);
continue;
}
int cat[2];
cat[0] = cf->pc_idx[0][i];
cat[1] = cf->pc_idx[1][i];
bool usewt = cf->wt[cat[0]] || cf->wt[cat[1]];
/* Read catalogue and construct the tree if necessary. */
for (int j = 0; j < 2; j++) {
int idx = cat[j];
if (!tree[idx])
tree[idx] = tree_create(conf, cf, idx, FCFC_TREE_TYPE_KDTREE);
if (!tree[idx]) {
CLEAN_TREE;
return FCFC_ERR_TREE;
}
}
/* Count pairs. */
printf("Counting %c%c pairs ...", cf->label[cat[0]], cf->label[cat[1]]);
if (conf->verbose) printf("\n");
fflush(stdout);
if (cat[0] == cat[1]) { /* auto pairs */
count_pairs(tree[cat[0]], tree[cat[0]], cf, cf->cnt[i], true, usewt);
/* Double auto pairs. */
if (usewt) {
for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k].d *= 2;
cf->norm[i] = cf->wdata[cat[0]] * (cf->wdata[cat[0]] - 1);
}
else {
for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k].i *= 2;
cf->norm[i] = (double) cf->ndata[cat[0]] * (cf->ndata[cat[0]] - 1);
}
}
else { /* cross counts */
count_pairs(tree[cat[0]], tree[cat[1]], cf, cf->cnt[i], false, usewt);
if (usewt) cf->norm[i] = cf->wdata[cat[0]] * cf->wdata[cat[1]];
else cf->norm[i] = (double) cf->ndata[cat[0]] * cf->ndata[cat[1]];
}
/* Normalise pair counts. */
if (usewt) {
for (size_t k = 0; k < cf->ntot; k++)
cf->ncnt[i][k] = cf->cnt[i][k].d / cf->norm[i];
}
else {
for (size_t k = 0; k < cf->ntot; k++)
cf->ncnt[i][k] = cf->cnt[i][k].i / cf->norm[i];
}
/* Save pair counts. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_PAIR_COUNT);
if (e) {
CLEAN_TREE;
return e;
}
/* Release memory if necessary. */
for (int j = 0; j < 2; j++) {
bool free_data = true;
for (int k = i + 1; k < cf->npc; k++) {
if (cat[j] == cf->pc_idx[0][k] || cat[j] == cf->pc_idx[1][k]) {
free_data = false;
break;
}
}
if (free_data) {
free(cf->data[cat[j]]);
cf->data[cat[j]] = NULL;
tree_destroy(tree[cat[j]], FCFC_TREE_TYPE_KDTREE);
tree[cat[j]] = NULL;
}
}
printf(FMT_DONE);
}
CLEAN_TREE;
return 0;
}
/******************************************************************************
Function `eval_cf_exp`:
Evaluate correlation functions given the expressions for estimators.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_exp(const CONF *conf, CF *cf) {
printf("Evaluate correlation function estimators ...");
if (conf->verbose) printf("\n");
fflush(stdout);
/* Prepare the array of normalised pair counts, for libast evaluations. */
#ifdef OMP
double *pc = calloc((size_t) cf->nthread * cf->npc, sizeof(double));
#else
double *pc = calloc(cf->npc, sizeof(double));
#endif
if (!pc) {
P_ERR("failed to allocate memory for 2PCF evaluation\n");
return FCFC_ERR_MEMORY;
}
for (int i = 0; i < cf->ncf; i++) {
#ifdef OMP
#pragma omp parallel num_threads(cf->nthread) firstprivate(pc)
{
const int tid = omp_get_thread_num();
pc += (size_t) tid * cf->npc;
#pragma omp for
#endif
for (size_t j = 0; j < cf->ntot; j++) {
/* Set pair counts to be passed to libast. */
for (int k = 0; k < cf->npc; k++) pc[k] = cf->ncnt[k][j];
/* Evaluate the 2PCF. */
if (ast_eval_num(cf->ast_cf[i], cf->cf[i] + j, pc, cf->npc)) {
ast_perror(cf->ast_cf[i], stderr,
FMT_ERR " failed to evaluate 2PCF:");
#ifdef OMP
exit(FCFC_ERR_AST);
#else
free(pc); return FCFC_ERR_AST;
#endif
}
}
#ifdef OMP
}
#endif
/* Save the correlation function. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_RAW);
if (e) {
free(pc); return e;
}
}
free(pc);
printf(FMT_DONE);
return 0;
}
/******************************************************************************
Function `eval_cf_mp`:
Evaluate correlation function multipoles given xi(s,mu).
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_mp(const CONF *conf, CF *cf) {
printf("Compute correlation function multipoles ...");
if (conf->verbose) printf("\n");
fflush(stdout);
for (int i = 0; i < cf->ncf; i++) {
for (int l = 0; l < cf->nl; l++) {
int ell = cf->poles[l];
double fac = (2 * ell + 1) / (double) cf->nmu;
for (int j = 0; j < cf->nmu; j++) {
double mu = (j + 0.5) / (double) cf->nmu;
for (int k = 0; k < cf->ns; k++) {
cf->mp[i][k + l * cf->ns] +=
cf->cf[i][k + j * cf->ns] * fac * legpoly(ell, mu);
}
}
}
/* Save the correlation function multipoles. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG);
if (e) return e;
}
printf(FMT_DONE);
return 0;
}
/******************************************************************************
Function `eval_cf_wp`:
Evaluate projected correlation functions given xi(s_perp,pi).
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_wp(const CONF *conf, CF *cf) {
printf("Compute projected correlation functions ...");
if (conf->verbose) printf("\n");
fflush(stdout);
for (int i = 0; i < cf->ncf; i++) {
for (int j = 0; j < cf->np; j++) {
double dpi = cf->pbin[j + 1] - cf->pbin[j];
for (int k = 0; k < cf->ns; k++) {
cf->wp[i][k] += 2 * cf->cf[i][k + j * cf->ns] * dpi;
}
}
/* Save the projected correlation functions. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG);
if (e) return e;
}
printf(FMT_DONE);
return 0;
}
/*============================================================================*\
Interface for correlation function evaluations
\*============================================================================*/
/******************************************************************************
Function `eval_cf`:
Evaluate correlation functions.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
int eval_cf(const CONF *conf, CF *cf) {
int e;
if ((e = eval_pairs(conf, cf))) return e;
if (cf->ncf) if ((e = eval_cf_exp(conf, cf))) return e;
if (cf->nl) if ((e = eval_cf_mp(conf, cf))) return e;
if (cf->comp_wp) if ((e = eval_cf_wp(conf, cf))) return e;
return 0;
}
|
idasFoodWeb_bnd_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* Based on idaFoodWeb_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDAS: Food web problem.
*
* This example program (OpenMP version) uses the SUNBAND linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDAS using the SUNBAND linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idasFoodWeb_bnd_omp
* To specify the number of threads at the command line, use
* % ./idasFoodWeb_bnd_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <idas/idas.h>
#include <sunmatrix/sunmatrix_band.h>
#include <sunlinsol/sunlinsol_band.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_direct.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
N_Vector rates;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNMatrix A;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, retval;
sunindextype mu, ml;
realtype rtol, atol, t0, tout, tret;
int num_threads;
ida_mem = NULL;
A = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads);
webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate();
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
/* Setup band matrix and linear solver, and attach to IDA. */
mu = ml = NSMX;
A = SUNBandMatrix(NEQ, mu, ml);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
LS = SUNLinSol_Band(cc, A);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, A);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(mu, ml, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
SUNMatDestroy(A);
N_VDestroy_OpenMP(cc);
N_VDestroy_OpenMP(cp);
N_VDestroy_OpenMP(id);
destroyMat(webdata->acoef);
N_VDestroy_OpenMP(webdata->rates);
free(webdata);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol)
{
printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n",
(long int) mu, (long int) ml);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, nreLS, nni, nje, netf, ncfn;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1);
retval = IDAGetNumJacEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJacEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre+nreLS);
printf("Number of Jacobian evaluations = %ld\n", nje);
printf("Number of nonlinear iterations = %ld\n", nni);
printf("Number of error test failures = %ld\n", netf);
printf("Number of nonlinear conv. failures = %ld\n", ncfn);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
simd-11.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
int s = 0, i, j, u;
void
foo ()
{
#pragma omp for simd schedule(static, 32) reduction(+:s) lastprivate(u) collapse(2)
for (i = 0; i < 16; i++)
for (j = 0; j < 16; j++)
{
s++;
u = i + j;
}
if (i != 16 || j != 16 || s != 256 || u != 30)
__builtin_abort ();
}
int
main ()
{
foo ();
return 0;
}
|
unionreduce.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_UNIONREDUCE_H_
#define SRC_SINGLENODE_UNIONREDUCE_H_
#include <algorithm>
template <typename Ta, typename Tb, typename Tc>
void union_dense(Ta* v1, int * bv1, int nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3,
void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp)
{
#pragma omp parallel for
for (int ii = 0; ii < nnz; ii++) {
bool set1 = get_bitvector(ii, bv1);
bool set2 = get_bitvector(ii, bv2);
if(set1 && !set2)
{
v3[ii] = v1[ii];
}
else if(!set1 && set2)
{
v3[ii] = v2[ii];
}
else if(set1 && set2)
{
op_fp(v1[ii], v2[ii], &(v3[ii]), vsp);
}
}
#pragma omp parallel for
for(int i = 0 ; i < num_ints ; i++)
{
bv3[i] = bv1[i] | bv2[i];
}
}
template <typename Ta, typename Tb>
void union_compressed(Ta* v1, int* indices, int nnz, int capacity, int num_ints, Tb * v2, int * bv2,
void (*op_fp)(const Ta&, const Tb&, Tb*, void*), void* vsp)
{
//int * indices = reinterpret_cast<int*>(v1 + nnz);
int npartitions = omp_get_max_threads() * 16;
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int nz_per = (nnz + npartitions - 1) / npartitions;
int start_nnz = p * nz_per;
int end_nnz = (p+1) * nz_per;
if(end_nnz > nnz) end_nnz = nnz;
// Adjust
if(start_nnz > 0)
{
while((start_nnz < nnz) && (indices[start_nnz]/32 == indices[start_nnz-1]/32)) start_nnz++;
}
while((end_nnz < nnz) && (indices[end_nnz]/32 == indices[end_nnz-1]/32)) end_nnz++;
for(int i = start_nnz ; i < end_nnz ; i++)
{
int idx = indices[i];
if(get_bitvector(idx, bv2))
{
//Tb tmp = v2[idx];
//op_fp(v1[i], tmp, &(v2[idx]), vsp);
//op_fp(v1[i], v2[idx], &(v2[idx]), vsp);
op_fp(v2[idx], v1[i], &(v2[idx]), vsp);
}
else
{
set_bitvector(idx, bv2);
v2[idx] = v1[i];
}
}
}
}
#endif // SRC_SINGLENODE_UNIONREDUCE_H_
|
sum_itlmic_kernel.c | //
// Created by Yonghong Yan on 1/7/16.
//
#ifdef __cplusplus
extern "C" {
#endif
#include "sum.h"
#include <offload.h>
#include <homp.h>
#ifdef USE_INTEL_MKL
#include <mkl.h>
#endif
void sum_itlmic_wrapper(omp_offloading_t *off, long start_n, long length_n, REAL *x, REAL *y, REAL * result) {
int sysid = off->dev->sysid;
int num_cores = off->dev->num_cores;
int i;
// printf("x: %X, y: %X: %d\n", x, y, (length_n - start_n)*sizeof(REAL));
REAL sum;
#ifndef ITLMIC_COMBINED_OFFLOADING
#pragma offload target(mic:sysid) in (x: length(0) alloc_if(0) free_if(0)) \
in (y: length(0) alloc_if(0) free_if(0)) out(sum)
#else
#pragma offload target(mic:sysid) in (x: length(length_n-start_n) align(64)) \
inout (y: length(length_n-start_n) align(64)) out(sum)
#endif
{
#pragma omp parallel for simd private(i) num_threads(num_cores) reduction(+:sum)
for (i = 0; i < length_n-start_n; i++) {
sum += x[i] * y[i];
}
}
*result = sum;
}
#ifdef __cplusplus
}
#endif
|
mapping.c | #define _MODULE_MAPPING
#define USE_PREFETCH
#include <omp.h>
#ifdef USE_PREFETCH
#include <xmmintrin.h>
#endif
#include <limits.h>
#include "mapping.h"
#include "output.h"
#include "../common/sw-full-common.h"
#include "../common/sw-full-cs.h"
#include "../common/sw-full-ls.h"
#include "../common/sw-vector.h"
#include "../common/read_hit_heap.h"
#include "../common/sw-post.h"
DEF_HEAP(uint32_t, uint, uu)
DEF_HEAP(double, struct read_hit_holder, unpaired)
DEF_HEAP(double, struct read_hit_pair_holder, paired)
#define RG_GET_MAP_ID(c) ( (c) >> 3 )
#define RG_SET_MAP_ID(c, id) (c) = ( (id) << 3 ) + 0x6;
#define RG_GET_HAS_2(c) ( ( (c) & 0x1 ) != 0 )
#define RG_SET_HAS_2(c) (c) |= 0x1
#define RG_VALID_MP_CNT(c) ( ( (c) & 0x6 ) != 0x6 )
#define RG_GET_MP_CNT(c) ( ( (c) >> 1 ) & 0x3 )
#define RG_SET_MP_CNT(c, cnt) (c) &= ~(0x6); (c) |= ( (cnt) << 1 )
/*
* Mapping routines
*/
static void
read_get_mapidxs_per_strand(struct read_entry * re, int st)
{
int i, sn, load, base, r_idx;
//uint32_t * kmerWindow = (uint32_t *)xcalloc(sizeof(kmerWindow[0]) * BPTO32BW(max_seed_span));
uint32_t kmerWindow[BPTO32BW(max_seed_span)];
assert(re != NULL);
assert(re->mapidx[st] == NULL);
//re->mapidx[st] = (uint32_t *)xmalloc(n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0]));
re->mapidx[st] = (uint32_t *)
my_malloc(n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0]),
&mem_mapping, "mapidx [%s]", re->name);
load = 0;
for (i = 0; i < re->read_len; i++) {
base = EXTRACT(re->read[st], i);
bitfield_prepend(kmerWindow, max_seed_span, base);
if (load < max_seed_span)
load++;
for (sn = 0; sn < n_seeds; sn++) {
if (i < re->min_kmer_pos + seed[sn].span - 1)
continue;
r_idx = i - seed[sn].span + 1;
re->mapidx[st][sn*re->max_n_kmers + (r_idx - re->min_kmer_pos)] = KMER_TO_MAPIDX(kmerWindow, sn);
}
}
//free(kmerWindow);
}
/*
* Extract spaced kmers from read, save them in re->mapidx.
*/
static inline void
read_get_mapidxs(struct read_entry * re)
{
read_get_mapidxs_per_strand(re, 0);
read_get_mapidxs_per_strand(re, 1);
#ifdef DEBUG_KMERS
{
uint sn, i, j;
fprintf(stderr, "max_n_kmers:%u, min_kmer_pos:%u\n",
re->max_n_kmers, re->min_kmer_pos);
fprintf(stderr, "collapsed kmers from read:\n");
for (sn = 0; sn < n_seeds; sn++) {
fprintf(stderr, "sn:%u\n", sn);
for (i = 0; re->min_kmer_pos + i + seed[sn].span <= re->read_len; i++) {
fprintf(stderr, "\tpos:%u kmer:", re->min_kmer_pos + i);
for (j = 0; j < seed[sn].weight; j++) {
fprintf(stderr, "%c%s",
base_translate((re->mapidx[0][sn*re->max_n_kmers + i] >> 2*(seed[sn].weight - 1 - j)) & 0x3,
shrimp_mode == MODE_COLOUR_SPACE),
j < seed[sn].weight - 1? "," : "\n");
}
}
}
fprintf(stderr, "collapsed kmers from read_rc:\n");
for (sn = 0; sn < n_seeds; sn++) {
fprintf(stderr, "sn:%u\n", sn);
for (i = 0; re->min_kmer_pos + i + seed[sn].span <= re->read_len; i++) {
fprintf(stderr, "\tpos:%u kmer:", re->min_kmer_pos + i);
for (j = 0; j < seed[sn].weight; j++) {
fprintf(stderr, "%c%s",
base_translate((re->mapidx[1][sn*re->max_n_kmers + i] >> 2*(seed[sn].weight - 1 - j)) & 0x3,
shrimp_mode == MODE_COLOUR_SPACE),
j < seed[sn].weight - 1? "," : "\n");
}
}
}
}
#endif
}
/*
static int
bin_search(uint32_t * array, int l, int r, uint32_t value)
{
int m;
while (l + 1 < r) {
m = (l + r - 1)/2;
if (array[m] < value)
l = m + 1;
else
r = m + 1;
}
if (l < r && array[l] < value)
return l + 1;
else
return l;
}
static void
read_get_restricted_anchor_list_per_strand(struct read_entry * re, int st, bool collapse)
{
int i, j, offset, sn;
llint g_start, g_end;
int idx_start, idx_end, k;
uint32_t mapidx;
int anchor_cache[re->read_len];
uint diag;
int l;
assert(0); // unmaintained!!
assert(re->mapidx[st] != NULL);
re->n_anchors[st] = 0;
if ((st == 0 && !Fflag) || (st == 1 && !Cflag))
return;
for (j = 0; j < re->n_ranges; j++) {
if (re->ranges[j].st != st)
continue;
g_start = (llint)contig_offsets[re->ranges[j].cn] + re->ranges[j].g_start;
g_end = (llint)contig_offsets[re->ranges[j].cn] + re->ranges[j].g_end;
for (sn = 0; sn < n_seeds; sn++) {
for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) {
offset = sn*re->max_n_kmers + i;
mapidx = re->mapidx[st][offset];
idx_start = bin_search(genomemap[sn][mapidx], 0, (int)genomemap_len[sn][mapidx], g_start);
idx_end = bin_search(genomemap[sn][mapidx], idx_start, (int)genomemap_len[sn][mapidx], g_end + 1);
if (idx_start >= idx_end)
continue;
re->anchors[st] = (struct anchor *)xrealloc(re->anchors[st],
sizeof(re->anchors[0][0])
* (re->n_anchors[st] + (idx_end - idx_start)));
for (k = 0; idx_start + k < idx_end; k++) {
re->anchors[st][re->n_anchors[st] + k].cn = re->ranges[j].cn;
re->anchors[st][re->n_anchors[st] + k].x =
genomemap[sn][mapidx][idx_start + k] - contig_offsets[re->ranges[j].cn];
re->anchors[st][re->n_anchors[st] + k].y = re->min_kmer_pos + i;
re->anchors[st][re->n_anchors[st] + k].length = seed[sn].span;
re->anchors[st][re->n_anchors[st] + k].weight = 1;
}
re->n_anchors[st] += (int)(idx_end - idx_start);
}
}
}
qsort(re->anchors[st], re->n_anchors[st], sizeof(re->anchors[0][0]), anchor_uw_cmp);
if (collapse) {
for (i = 0; i < re->read_len; i++)
anchor_cache[i] = -1;
for (k = 0, i = 0; i < re->n_anchors[st]; i++) {
re->anchors[st][k] = re->anchors[st][i];
diag = (re->anchors[st][k].x + re->read_len - re->anchors[st][k].y) % re->read_len;
l = anchor_cache[diag];
if (l >= 0
&& re->anchors[st][l].cn == re->anchors[st][k].cn
&& anchor_uw_intersect(&re->anchors[st][l], &re->anchors[st][k])) {
anchor_uw_join(&re->anchors[st][l], &re->anchors[st][k]);
} else {
anchor_cache[diag] = k;
k++;
}
}
re->n_anchors[st] = k;
}
}
*/
#if defined (DEBUG_HIT_LIST_CREATION) || defined (DEBUG_HIT_LIST_PAIR_UP) \
|| defined (DEBUG_HIT_LIST_PASS1) || defined (DEBUG_HIT_LIST_PAIRED_HITS)
static void
dump_hit(struct read_hit * hit) {
fprintf(stderr, "(cn:%d,st:%d,gen_st:%d,g_off:%lld,w_len:%d,scores:(wg=%d,vc=%d,fl=%d,poster=%.5g),"
"matches:%d,pair_min:%d,pair_max:%d,anchor:(x=%lld,y=%lld,ln=%d,wd=%d))\n",
hit->cn, hit->st, hit->gen_st, hit->g_off_pos_strand, hit->w_len,
hit->score_window_gen, hit->score_vector, hit->score_full, hit->sfrp != NULL? hit->sfrp->posterior : -1.0,
hit->matches, hit->pair_min, hit->pair_max,
hit->anchor.x, hit->anchor.y, hit->anchor.length, hit->anchor.width);
}
#endif
#if defined (DEBUG_HIT_LIST_CREATION) || defined (DEBUG_HIT_LIST_PAIR_UP) \
|| defined (DEBUG_HIT_LIST_PASS1)
static void
dump_hit_list(struct read_entry * re, int st, bool only_paired, bool only_after_vector)
{
int i;
for (i = 0; i < (int)re->n_hits[st]; i++) {
if (only_paired && re->hits[st][i].pair_min < 0)
continue;
if (only_after_vector && re->hits[st][i].score_vector < 0)
continue;
dump_hit(&re->hits[st][i]);
}
}
#endif
/*
* Reverse read hit.
*
* The 'st' strand of the read matches the 'gen_st' strand of the genome. Negate both.
*/
static inline void
reverse_hit(struct read_entry * re, struct read_hit * rh)
{
assert(re != NULL && rh != NULL);
rh->g_off = genome_len[rh->cn] - rh->g_off - rh->w_len;
anchor_reverse(&rh->anchor, rh->w_len, re->read_len);
rh->gen_st = 1 - rh->gen_st;
rh->st = 1 - rh->st;
}
static void
readpair_pair_up_hits(struct read_entry * re1, struct read_entry * re2)
{
int st1, st2, i, j, k, l;
for (st1 = 0; st1 < 2; st1++) {
st2 = 1 - st1; // opposite strand
j = 0; // invariant: matching hit at index j or larger
for (i = 0; i < re1->n_hits[st1]; i++) {
// find matching hit, if any
while (j < re2->n_hits[st2]
&& (re2->hits[st2][j].cn < re1->hits[st1][i].cn // prev contig
|| (re2->hits[st2][j].cn == re1->hits[st1][i].cn // same contig, but too close
&& (int64_t)(re2->hits[st2][j].g_off ) < (int64_t)(re1->hits[st1][i].g_off) + (int64_t)re1->delta_g_off_min[st1]
)
)
)
{
j++;
}
k = j;
while (k < re2->n_hits[st2]
&& re2->hits[st2][k].cn == re1->hits[st1][i].cn
&& (int64_t)(re2->hits[st2][k].g_off) <= (int64_t)(re1->hits[st1][i].g_off) + (int64_t)re1->delta_g_off_max[st1]
)
{
k++;
}
//fprintf(stderr,"DONE\n");
if (j == k) {
//fprintf(stderr,"no paired hit\n");
continue;
}
re1->hits[st1][i].pair_min = j;
re1->hits[st1][i].pair_max = k-1;
for (l = j; l < k; l++) {
if (re2->hits[st2][l].pair_min < 0) {
re2->hits[st2][l].pair_min = i;
}
re2->hits[st2][l].pair_max = i;
}
}
}
#ifdef DEBUG_HIT_LIST_PAIR_UP
#pragma omp critical (cs_stderr)
{
fprintf(stderr, "Dumping hit list after pair-up for read:[%s]\n", re1->name);
dump_hit_list(re1, 0, false, false);
dump_hit_list(re1, 1, false, false);
fprintf(stderr, ".. and read:[%s]\n", re2->name);
dump_hit_list(re2, 0, false, false);
dump_hit_list(re2, 1, false, false);
}
#endif
}
/*
* Run full SW filter on this hit.
*/
static void
hit_run_full_sw(struct read_entry * re, struct read_hit * rh, int thresh)
{
uint32_t * gen = NULL;
assert(re != NULL && rh != NULL);
/*
if (rh->gen_st!=0) {
fprintf(stderr,"[%s] rh->gen_st is %d\n%d and %d\n",re->name, rh->gen_st,min_insert_size,max_insert_size);
}
assert(rh->gen_st == 0);
if (rh->st == re->input_strand) {
gen = genome_contigs[rh->cn];
} else {
reverse_hit(re, rh);
//fprintf(stderr, "reverse_hit from hit_run_full_sw [%s]\n", re->name);
gen = genome_contigs_rc[rh->cn];
}
*/
if (rh->st != re->input_strand) {
reverse_hit(re, rh);
}
if (rh->gen_st == 0) {
gen = genome_contigs[rh->cn];
} else {
gen = genome_contigs_rc[rh->cn];
}
// allocate sfrp struct
assert(rh->sfrp == NULL);
rh->sfrp = (struct sw_full_results *)my_calloc(sizeof(rh->sfrp[0]), &mem_mapping, "sfrp [%s]", re->name);
rh->sfrp->in_use = false;
rh->sfrp->mqv = 255; // unavailable
#ifdef DEBUG_SW_FULL_CALLS
fprintf(stderr, "SW full call: (name:[%s],cn:%d,st:%d,gen_st:%d,g_off:%lld,w_len:%d,anchor:(%lld,%lld,%d,%d))\n",
re->name, rh->cn, rh->st, rh->gen_st, rh->g_off, rh->w_len,
rh->anchor.x, rh->anchor.y, rh->anchor.length, rh->anchor.width);
#endif
if (shrimp_mode == MODE_COLOUR_SPACE) {
sw_full_cs(gen, rh->g_off, rh->w_len,
re->read[rh->st], re->read_len, re->initbp[rh->st],
thresh, rh->sfrp, rh->gen_st && Tflag, genome_is_rna,
&rh->anchor, 1,Gflag ? 0 : 1, re->crossover_score);
} else {
/*
* The full SW in letter space assumes it's given the correct max score.
* This might not be true just yet if we're using hashing&caching because
* of possible hash collosions.
*/
rh->score_vector = sw_vector(gen, rh->g_off, rh->w_len,
re->read[rh->st], re->read_len,
NULL, -1, genome_is_rna);
if (rh->score_vector >= thresh) {
sw_full_ls(gen, rh->g_off, rh->w_len,
re->read[rh->st], re->read_len,
thresh, rh->score_vector, rh->sfrp, rh->gen_st && Tflag,
&rh->anchor, 1, Gflag ? 0 : 1);
//assert(rh->sfrp->score == rh->score_vector);
} else { // this wouldn't have passed the filter
rh->sfrp->score = 0;
}
}
rh->score_full = rh->sfrp->score;
rh->pct_score_full = (1000 * 100 * rh->score_full)/rh->score_max;
}
int
get_insert_size(struct read_hit *rh, struct read_hit * rh_mp)
{
if (rh_mp == NULL || rh == NULL || rh->cn != rh_mp->cn) {
return 0;
}
//get the mate pair info
int read_start_mp = rh_mp->sfrp->read_start+1; //1based
int read_end_mp = read_start_mp + rh_mp->sfrp->rmapped -1; //1base
int genome_length_mp = genome_len[rh_mp->cn];
int genome_start_mp;
bool reverse_strand_mp = (rh_mp->gen_st ==1);
if (!reverse_strand_mp) {
genome_start_mp = rh_mp->sfrp->genome_start+1; // 0 based -> 1 based
} else {
int genome_right_most_coordinate = genome_length_mp - rh_mp->sfrp->genome_start;
//rh->sfrp->deletions is deletions in the reference
// This is when the read has extra characters that dont match into ref
genome_start_mp = genome_right_most_coordinate - (read_end_mp - read_start_mp - rh_mp->sfrp->deletions + rh_mp->sfrp->insertions);
}
int genome_end_mp=genome_start_mp+rh_mp->sfrp->gmapped-1;
//get the other hit info
int genome_start;
bool reverse_strand=(rh->gen_st == 1);
if (!reverse_strand) {
genome_start = rh->sfrp->genome_start+1; // 0 based -> 1 based
} else {
int read_start = rh->sfrp->read_start+1; //1based
int read_end = read_start + rh->sfrp->rmapped -1; //1base
int genome_length = genome_len[rh->cn];
int genome_right_most_coordinate = genome_length - rh->sfrp->genome_start;
//rh->sfrp->deletions is deletions in the reference
// This is when the read has extra characters that dont match into ref
genome_start = genome_right_most_coordinate - (read_end - read_start - rh->sfrp->deletions + rh->sfrp->insertions);
}
int genome_end=genome_start+rh->sfrp->gmapped-1;
int fivep = 0;
int fivep_mp = 0;
if (reverse_strand){
fivep = genome_end;
} else {
fivep = genome_start - 1;
}
if (reverse_strand_mp){
fivep_mp = genome_end_mp;
} else {
fivep_mp = genome_start_mp-1;
}
return (fivep_mp - fivep);
}
static void
read_get_region_counts(struct read_entry * re, int st, struct regions_options * options)
{
int sn, i, offset, region;
uint j;
//llint before = gettimeinusecs();
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.region_counts_tc);
int number_in_pair = re->first_in_pair? 0 : 1;
assert(use_regions);
if (region_map_id == 0) {
region_map_id = 1;
for (int _nip = 0; _nip < 2; _nip++) {
for (int _st = 0; _st < 2; _st++) {
//free(region_map[_nip][_st]);
my_free(region_map[_nip][_st], n_regions * sizeof(region_map[0][0][0]),
&mem_mapping, "region_map");
//region_map[_nip][_st] = (int32_t *)xcalloc(n_regions * sizeof(region_map[0][0][0]));
region_map[_nip][_st] = (region_map_t *)
my_calloc(n_regions * sizeof(region_map[0][0][0]),
&mem_mapping, "region_map");
//memset(region_map[_nip][_st], 0, n_regions * sizeof(region_map[0][0][0]));
}
}
}
assert(region_map[0][0] != NULL);
for (sn = 0; //(options->min_seed >= 0? options->min_seed : 0);
sn <= n_seeds - 1; //(options->max_seed >= 0? options->max_seed: n_seeds - 1);
sn++) {
for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) {
offset = sn*re->max_n_kmers + i;
if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff)
continue;
for (j = 0; j < genomemap_len[sn][re->mapidx[st][offset]]; j++) {
#ifdef USE_PREFETCH
if (j + 4 < genomemap_len[sn][re->mapidx[st][offset]]) {
int region_ahead = (int)(genomemap[sn][re->mapidx[st][offset]][j + 4] >> region_bits);
_mm_prefetch((char *)®ion_map[number_in_pair][st][region_ahead], _MM_HINT_T0);
}
#endif
region = (int)(genomemap[sn][re->mapidx[st][offset]][j] >> region_bits);
// BEGIN COPY
if (RG_GET_MAP_ID(region_map[number_in_pair][st][region]) == region_map_id) {
// a previous kmer set it, so there are >=2 kmers in this region
RG_SET_HAS_2(region_map[number_in_pair][st][region]);
} else {
region_map[number_in_pair][st][region] = 0; // clear old entry
RG_SET_MAP_ID(region_map[number_in_pair][st][region], region_map_id); // set new id
}
// END COPY
// extend regions by region_overlap
if ((genomemap[sn][re->mapidx[st][offset]][j] & ((1 << region_bits) - 1)) < (uint)region_overlap && region > 0) {
region--;
// BEGIN PASTE
if (RG_GET_MAP_ID(region_map[number_in_pair][st][region]) == region_map_id) {
// a previous kmer set it, so there are >=2 kmers in this region
RG_SET_HAS_2(region_map[number_in_pair][st][region]);
} else {
region_map[number_in_pair][st][region] = 0; // clear old entry
RG_SET_MAP_ID(region_map[number_in_pair][st][region], region_map_id); // set new id
}
// END PASTE
}
}
}
}
//region_counts_usecs[omp_get_thread_num()] += gettimeinusecs() - before;
//after = rdtsc();
//tpg.region_counts_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.region_counts_tc);
}
static void
read_get_mp_region_counts(struct read_entry * re, int st)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.mp_region_counts_tc);
int nip, sn, i, offset, region;
int first, last, max, k;
unsigned int j;
nip = re->first_in_pair? 0 : 1;
for (sn = 0; sn < n_seeds; sn++) {
for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) {
offset = sn*re->max_n_kmers + i;
if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff)
continue;
for (j = 0; j < genomemap_len[sn][re->mapidx[st][offset]]; j++) {
#ifdef USE_PREFETCH
if (j + 4 < genomemap_len[sn][re->mapidx[st][offset]]) {
int region_ahead = (int)(genomemap[sn][re->mapidx[st][offset]][j + 4] >> region_bits);
_mm_prefetch((char *)®ion_map[nip][st][region_ahead], _MM_HINT_T0);
_mm_prefetch((char *)®ion_map[1-nip][1-st][region_ahead], _MM_HINT_T0);
}
#endif
region = (int)(genomemap[sn][re->mapidx[st][offset]][j] >> region_bits);
if (!RG_VALID_MP_CNT(region_map[nip][st][region])) {
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last && max < 2; k++) {
if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) {
max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1);
}
}
RG_SET_MP_CNT(region_map[nip][st][region], max);
}
if (region > 0
&& (genomemap[sn][re->mapidx[st][offset]][j] & ((1 << region_bits) - 1)) < (uint)region_overlap) {
region--;
if (!RG_VALID_MP_CNT(region_map[nip][st][region])) {
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last && max < 2; k++) {
if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) {
max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1);
}
}
RG_SET_MP_CNT(region_map[nip][st][region], max);
}
}
}
}
}
//after = rdtsc();
//tpg.mp_region_counts_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.mp_region_counts_tc);
}
/*
static void
read_get_mp_region_counts_per_strand(struct read_entry * re, int st, struct read_entry * re_mp,
struct regions_options * options)
{
int i, first, last, j, max;
for (i = 0; i < n_regions; i++) {
max = 0;
first = i + re->delta_region_min[st];
if (first < 0)
first = 0;
last = i + re->delta_region_max[st];
if (last > n_regions - 1)
last = n_regions - 1;
for (j = first; j <= last; j++) {
if (re_mp->region_map[1-st][0][j] == re_mp->region_map_id
&& re_mp->region_map[1-st][1][j] > max)
max = re_mp->region_map[1-st][1][j];
}
re->region_map[st][2][i] = max;
}
}
static inline void
read_get_mp_region_counts(struct read_entry * re, struct read_entry * re_mp,
struct regions_options * options)
{
read_get_mp_region_counts_per_strand(re, 0, re_mp, options);
read_get_mp_region_counts_per_strand(re, 1, re_mp, options);
}
*/
static inline void
advance_index_in_genomemap(struct read_entry * re, int st,
struct anchor_list_options * options,
uint * idx, uint max_idx, uint32_t * map, int * anchors_discarded)
{
//int first, last, max, k;
int nip = re->first_in_pair? 0 : 1;
int count_main, count_mp;
while (*idx < max_idx) {
#ifdef USE_PREFETCH
if (*idx + 2 < max_idx) {
int region_ahead = (int)(map[*idx + 2] >> region_bits);
_mm_prefetch((char *)®ion_map[nip][st][region_ahead], _MM_HINT_T0);
}
#endif
int region = (int)(map[*idx] >> region_bits);
assert(RG_GET_MAP_ID(region_map[nip][st][region]) == region_map_id);
// if necessary, compute the mp counts
if (options->use_mp_region_counts != 0)
{
/*
if (!RG_VALID_MP_CNT(region_map[nip][st][region])) {
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last && max < 2; k++) {
if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) {
max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1);
}
}
RG_SET_MP_CNT(region_map[nip][st][region], max);
count_mp = max;
}
if (region > 0
&& (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) {
region--;
if (!RG_VALID_MP_CNT(region_map[nip][st][region])) {
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last && max < 2; k++) {
if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) {
max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1);
}
}
RG_SET_MP_CNT(region_map[nip][st][region], max);
count_mp = max;
}
region++;
}
*/
// BEGIN COPY
count_main = (RG_GET_HAS_2(region_map[nip][st][region]) ? 2 : 1);
count_mp = RG_GET_MP_CNT(region_map[nip][st][region]);
if ((options->use_mp_region_counts == 1 && (count_main >= 2 && count_mp >= 2))
|| (options->use_mp_region_counts == 2 && (count_main >= 2 || count_mp >= 2))
|| (options->use_mp_region_counts == 3 && (count_mp >= 1 && (count_main + count_mp) >= 3))
)
break;
// END COPY
if (region > 0
&& (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) {
region--;
//BEGIN PASTE
count_main = (RG_GET_HAS_2(region_map[nip][st][region]) ? 2 : 1);
count_mp = RG_GET_MP_CNT(region_map[nip][st][region]);
if ((options->use_mp_region_counts == 1 && (count_main >= 2 && count_mp >= 2))
|| (options->use_mp_region_counts == 2 && (count_main >= 2 || count_mp >= 2))
|| (options->use_mp_region_counts == 3 && (count_mp >= 1 && (count_main + count_mp) >= 3))
)
break;
//END PASTE
}
}
else // don't use mp counts at all
{
if (RG_GET_HAS_2(region_map[nip][st][region]))
break;
if (region > 0
&& (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) {
region--;
if (RG_GET_HAS_2(region_map[nip][st][region]))
break;
}
}
/*
if ((options->min_count[0] == 0 || options->min_count[0] <= (region_map[nip][st][region] & ((1 << 8) - 1)))
&& (options->max_count[0] == 0 || options->max_count[0] >= (region_map[nip][st][region] & ((1 << 8) - 1))))
{
if (options->min_count[1] != 0 && options->max_count[1] != 0)
break;
if ((region_map[nip][st][region] >> 31) == 0) {
// compute mp counts
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last; k++) {
if (((region_map[1-nip][1-st][k] >> 16) & ((1 << region_map_id_bits) - 1)) == region_map_id
&& (region_map[1-nip][1-st][k] & ((1 << 8) - 1)) > max) {
max = (region_map[1-nip][1-st][k] & ((1 << 8) - 1));
}
}
region_map[nip][st][region] |= (1 << 31);
region_map[nip][st][region] |= (max << 8);
}
if ((options->min_count[1] == 0 || options->min_count[1] <= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1)))
&& (options->max_count[1] == 0 || options->max_count[1] >= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1)))
)
break;
}
if ((map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap && region > 0) {
region--;
// copy-paste from above -- SERIOUSLY
if ((options->min_count[0] == 0 || options->min_count[0] <= (region_map[nip][st][region] & ((1 << 8) - 1)))
&& (options->max_count[0] == 0 || options->max_count[0] >= (region_map[nip][st][region] & ((1 << 8) - 1))))
{
if (options->min_count[1] != 0 && options->max_count[1] != 0)
break;
if ((region_map[nip][st][region] >> 31) == 0) {
// compute mp counts
first = MAX(0, region + re->delta_region_min[st]);
last = MIN(n_regions - 1, region + re->delta_region_max[st]);
max = 0;
for (k = first; k <= last; k++) {
if (((region_map[1-nip][1-st][k] >> 16) & ((1 << region_map_id_bits) - 1)) == region_map_id
&& (region_map[1-nip][1-st][k] & ((1 << 8) - 1)) > max) {
max = (region_map[1-nip][1-st][k] & ((1 << 8) - 1));
}
}
region_map[nip][st][region] |= (1 << 31);
region_map[nip][st][region] |= (max << 8);
}
if ((options->min_count[1] == 0 || options->min_count[1] <= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1)))
&& (options->max_count[1] == 0 || options->max_count[1] >= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1)))
)
break;
}
}
*/
(*anchors_discarded)++;
(*idx)++;
}
}
/*
static void
expand_anchor(read_entry * re, int st, anchor * a)
{
uint32_t * db = (shrimp_mode == MODE_LETTER_SPACE? genome_contigs[a->cn] : genome_cs_contigs[a->cn]);
uint32_t * qr = re->read[st];
llint x = (llint)a->x - (llint)contig_offsets[a->cn];
int y = a->y;
int i, sc;
int mm_score = (shrimp_mode == MODE_LETTER_SPACE? mismatch_score : match_score + crossover_score);
// first, compute the real number of matches
assert(x + a->length <= genome_len[a->cn]);
assert(y + a->length <= re->read_len);
sc = 0;
for (i = 0; i < a->length; i++) {
sc += (EXTRACT(db, x + (llint)i) == EXTRACT(qr, y + i) ? match_score : mm_score);
}
// extend backward
int min_i = 0;
int max_sc = sc;
for (i = -1; x + i >= 0 && y + i >= 0; i--) {
sc += (EXTRACT(db, x + (llint)i) == EXTRACT(qr, y + i) ? match_score : mm_score);
if (sc < 0 || sc < max_sc + 5 * mm_score) break;
if (sc > max_sc) {
min_i = i;
max_sc = sc;
}
}
// extend forward
int max_i = a->length - 1;
for (i = max_i + 1; x + i < genome_len[a->cn] && y + i < re->read_len; i++) {
sc += (EXTRACT(db, x + i) == EXTRACT(qr, y + i) ? match_score : mm_score);
if (sc < 0 || sc < max_sc + 5 * mm_score) break;
if (sc > max_sc) {
max_i = i;
max_sc = sc;
}
}
// adjust anchor
a->x += min_i;
a->y += min_i;
a->length = max_i - min_i + 1;
//if (a->length > 20) a->weight++;
a->score = max_sc;
assert(a->score <= a->length * match_score && a->score >= a->length * mm_score);
}
*/
static void
read_get_anchor_list_per_strand(struct read_entry * re, int st,
struct anchor_list_options * options)
{
uint list_sz;
uint offset;
int i, sn;
//uint * idx;
struct heap_uu h;
struct heap_uu_elem tmp;
int anchor_cache[re->read_len];
int anchors_discarded = 0;
int big_gaps = 0;
assert(re != NULL && options != NULL);
assert(re->anchors[st] == NULL && re->n_anchors[st] == 0);
if (re->mapidx[st] == NULL)
return;
if ((st == 0 && !Fflag) || (st == 1 && !Cflag))
return;
// compute estimate size of anchor list
list_sz = 0;
for (sn = 0; sn < n_seeds; sn++) {
for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) {
offset = sn*re->max_n_kmers + i;
if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff)
continue;
list_sz += genomemap_len[sn][re->mapidx[st][offset]];
}
}
stat_add(&tpg.anchor_list_init_size, list_sz);
// init anchor list
//re->anchors[st] = (struct anchor *)xmalloc(list_sz * sizeof(re->anchors[0][0]));
re->anchors[st] = (struct anchor *)
my_malloc(list_sz * sizeof(re->anchors[0][0]),
&mem_mapping, "anchors [%s]", re->name);
// init min heap, indices in genomemap lists, and anchor_cache
heap_uu_init(&h, n_seeds * re->max_n_kmers);
//idx = (uint *)xcalloc(n_seeds * re->max_n_kmers * sizeof(idx[0]));
//uint32_t * idx = (uint32_t *)
// my_calloc(n_seeds * re->max_n_kmers * sizeof(idx[0]), &mem_mapping, "idx for read [%s]", re->name);
uint32_t idx[n_seeds * re->max_n_kmers];
memset(idx, 0, n_seeds * re->max_n_kmers * sizeof(idx[0]));
for (i = 0; i < re->read_len; i++)
anchor_cache[i] = -1;
// load inital anchors in min heap
for (sn = 0; sn < n_seeds; sn++) {
for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) {
offset = sn*re->max_n_kmers + i;
if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff) {
idx[offset] = genomemap_len[sn][re->mapidx[st][offset]];
}
if (options->use_region_counts) {
advance_index_in_genomemap(re, st, options,
&idx[offset], genomemap_len[sn][re->mapidx[st][offset]],
genomemap[sn][re->mapidx[st][offset]],
&anchors_discarded);
}
if (idx[offset] < genomemap_len[sn][re->mapidx[st][offset]]) {
tmp.key = genomemap[sn][re->mapidx[st][offset]][idx[offset]];
tmp.rest = offset;
heap_uu_insert(&h, &tmp);
idx[offset]++;
}
}
}
while (h.load > 0) {
// extract min
heap_uu_get_min(&h, &tmp);
// add to anchor list
offset = tmp.rest;
sn = offset / re->max_n_kmers;
i = offset % re->max_n_kmers;
re->anchors[st][re->n_anchors[st]].x = tmp.key;
re->anchors[st][re->n_anchors[st]].y = re->min_kmer_pos + i;
re->anchors[st][re->n_anchors[st]].length = seed[sn].span;
re->anchors[st][re->n_anchors[st]].width = 1;
re->anchors[st][re->n_anchors[st]].weight = 1;
get_contig_num(re->anchors[st][re->n_anchors[st]].x, &re->anchors[st][re->n_anchors[st]].cn);
if (re->n_anchors[st] > 0 && tmp.key - re->anchors[st][re->n_anchors[st] - 1].x >= anchor_list_big_gap)
big_gaps++;
re->n_anchors[st]++;
if (options->collapse) {
// check if current anchor intersects the cached one on the same diagonal
uint diag = (re->anchors[st][re->n_anchors[st]-1].x + re->read_len - re->anchors[st][re->n_anchors[st]-1].y) % re->read_len;
int j = anchor_cache[diag];
if (j >= 0
&& re->anchors[st][j].cn == re->anchors[st][re->n_anchors[st]-1].cn
//&& anchor_uw_intersect(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1])) {
&& anchor_uw_colinear(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1])) {
anchor_uw_join(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1]);
re->n_anchors[st]--;
} else {
anchor_cache[diag] = re->n_anchors[st]-1;
}
}
if (options->use_region_counts) {
advance_index_in_genomemap(re, st, options,
&idx[offset], genomemap_len[sn][re->mapidx[st][offset]],
genomemap[sn][re->mapidx[st][offset]],
&anchors_discarded);
}
// load next anchor for that seed/mapidx
if (idx[offset] < genomemap_len[sn][re->mapidx[st][offset]]) {
tmp.key = genomemap[sn][re->mapidx[st][offset]][idx[offset]];
tmp.rest = offset;
heap_uu_replace_min(&h, &tmp);
idx[offset]++;
} else {
heap_uu_extract_min(&h, &tmp);
}
}
heap_uu_destroy(&h);
//free(idx);
//my_free(idx, n_seeds * re->max_n_kmers * sizeof(idx[0]), &mem_mapping, "idx");
re->anchors[st] = (struct anchor *)
my_realloc(re->anchors[st], re->n_anchors[st] * sizeof(re->anchors[0][0]), list_sz * sizeof(re->anchors[0][0]),
&mem_mapping, "anchors [%s]", re->name);
//if (hack)
// for (i = 0; i < re->n_anchors[st]; i++) {
// expand_anchor(re, st, &re->anchors[st][i]);
// }
stat_add(&tpg.n_anchors_discarded, anchors_discarded);
stat_add(&tpg.n_big_gaps_anchor_list, big_gaps);
}
static inline void
read_get_anchor_list(struct read_entry * re, struct anchor_list_options * options)
{
//llint before = gettimeinusecs();
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.anchor_list_tc);
read_get_anchor_list_per_strand(re, 0, options);
read_get_anchor_list_per_strand(re, 1, options);
//anchor_list_usecs[omp_get_thread_num()] += gettimeinusecs() - before;
//after = rdtsc();
//tpg.anchor_list_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.anchor_list_tc);
}
static void
read_get_hit_list_per_strand(struct read_entry * re, int st, struct hit_list_options * options)
{
llint goff, gstart, gend;
int max_score, tmp_score = 0;
int i, j, cn, max_idx;
int w_len;
int short_len = 0, long_len = 0;
int heavy_mp = false; // init not needed
int gap_open_score, gap_extend_score;
struct anchor a[3];
assert(re != NULL && options != NULL);
assert(re->hits[st] == NULL && re->n_hits[st] == 0);
if (re->n_anchors[st] == 0)
return;
//re->hits[st] = (struct read_hit *)xcalloc(re->n_anchors[st] * sizeof(re->hits[0][0]));
re->hits[st] = (struct read_hit *)
my_calloc(re->n_anchors[st] * sizeof(re->hits[0][0]),
&mem_mapping, "hits [%s]", re->name);
for (i = 0; i < re->n_anchors[st]; i++) {
// contig num of crt anchor
cn = re->anchors[st][i].cn;
// w_len
w_len = re->window_len;
if ((uint32_t)w_len > genome_len[cn])
w_len = (int)genome_len[cn];
// set gstart and gend
gend = (re->anchors[st][i].x - contig_offsets[cn]) + re->read_len - 1 - re->anchors[st][i].y;
if (gend > genome_len[cn] - 1) {
gend = genome_len[cn] - 1;
}
if (gend >= re->window_len) {
gstart = gend - re->window_len;
} else {
gstart = 0;
}
/*
* Modes of matching:
* - gapless: only extend current anchor; no window_gen_threshold check
* - n=1: one kmer match & no window_gen_threshold check; or at least two matches
* - n=2: at least two kmer matches & window_gen_threshold check
* - n=3: one kmer & >=2 kmers for mp
*/
max_idx = i;
//if (!hack)
max_score = re->anchors[st][i].length * match_score;
//else
// max_score = re->anchors[st][i].score;
if (options->match_mode == 3) {
int region = re->anchors[st][i].x >> region_bits;
assert(RG_VALID_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]));
heavy_mp = (RG_GET_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]) >= 2);
if (!heavy_mp && region > 0
&& (re->anchors[st][i].x & ((1 << region_bits) - 1)) < (uint)region_overlap) {
region--;
assert(RG_VALID_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]));
heavy_mp = (RG_GET_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]) >= 2);
}
}
if (!options->gapless) {
// avoid single matches when n=2
if ((options->match_mode == 2
|| (options->match_mode == 3 && !heavy_mp))
&& re->anchors[st][i].weight == 1) // && !hack)
max_score = -1;
for (j = i - 1;
j >= 0
&& re->anchors[st][j].x >= (llint)contig_offsets[cn] + gstart;
j--) {
if (re->anchors[st][j].y >= re->anchors[st][i].y) {
continue;
}
//if (hack
// && re->anchors[st][j].x == re->anchors[st][i].x
// && re->anchors[st][j].y == re->anchors[st][i].y)
// continue;
if (re->anchors[st][i].x - (llint)contig_offsets[cn] - re->anchors[st][i].y
> re->anchors[st][j].x - (llint)contig_offsets[cn] - re->anchors[st][j].y)
{ // deletion in read
short_len = (int)(re->anchors[st][i].y - re->anchors[st][j].y) + re->anchors[st][i].length;
long_len = (int)(re->anchors[st][i].x - re->anchors[st][j].x) + re->anchors[st][i].length;
gap_open_score = a_gap_open_score;
gap_extend_score = a_gap_extend_score;
}
else
{ // insertion in read
short_len = (int)(re->anchors[st][i].x - re->anchors[st][j].x) + re->anchors[st][i].length;
long_len = (int)(re->anchors[st][i].y - re->anchors[st][j].y) + re->anchors[st][i].length;
gap_open_score = b_gap_open_score;
gap_extend_score = b_gap_extend_score;
}
assert(long_len >= short_len);
//if (!hack) {
if (long_len > short_len) {
tmp_score = short_len * match_score + b_gap_open_score
+ (long_len - short_len) * b_gap_extend_score;
} else {
tmp_score = short_len * match_score;
}
//} else {
// int missing_matches = abs(re->anchors[st][i].length + re->anchors[st][j].length - short_len);
// tmp_score = re->anchors[st][i].score + re->anchors[st][j].score
// + MAX(missing_matches - 5, 0) * (shrimp_mode == MODE_LETTER_SPACE? mismatch_score : match_score + crossover_score);
// if (long_len > short_len)
// tmp_score += gap_open_score + (long_len - short_len) * gap_extend_score;
//}
if (tmp_score > max_score) {
max_idx = j;
max_score = tmp_score;
}
}
}
if (options->gapless
|| options->match_mode == 1
|| (options->match_mode == 3 && heavy_mp)
|| max_score >= (int)abs_or_pct(options->threshold,
(re->read_len < w_len ? re->read_len : w_len) * match_score))
{
// set goff
int x_len = (int)(re->anchors[st][i].x - re->anchors[st][max_idx].x) + re->anchors[st][i].length;
if ((re->window_len - x_len)/2 < re->anchors[st][max_idx].x - contig_offsets[cn]) {
goff = (re->anchors[st][max_idx].x - contig_offsets[cn]) - (re->window_len - x_len)/2;
} else {
goff = 0;
}
if (goff + w_len > genome_len[cn]) {
goff = genome_len[cn] - w_len;
}
// compute anchor
if (max_idx < i) {
a[0] = re->anchors[st][i];
anchor_to_relative(&a[0], contig_offsets[cn] + goff);
a[1] = re->anchors[st][max_idx];
anchor_to_relative(&a[1], contig_offsets[cn] + goff);
anchor_join(a, 2, &a[2]);
} else {
a[2] = re->anchors[st][i];
anchor_to_relative(&a[2], contig_offsets[cn] + goff);
}
// add hit
re->hits[st][re->n_hits[st]].g_off = goff;
re->hits[st][re->n_hits[st]].g_off_pos_strand = goff;
re->hits[st][re->n_hits[st]].w_len = w_len;
re->hits[st][re->n_hits[st]].cn = cn;
re->hits[st][re->n_hits[st]].st = st;
re->hits[st][re->n_hits[st]].gen_st = 0;
re->hits[st][re->n_hits[st]].anchor = a[2];
re->hits[st][re->n_hits[st]].score_window_gen = max_score;
re->hits[st][re->n_hits[st]].matches = (options->gapless || max_idx == i?
re->anchors[st][i].weight :
re->anchors[st][i].weight + re->anchors[st][max_idx].weight);
re->hits[st][re->n_hits[st]].score_vector = -1;
re->hits[st][re->n_hits[st]].score_full = -1;
re->hits[st][re->n_hits[st]].score_max = (re->read_len < w_len? re->read_len : w_len) * match_score;
re->hits[st][re->n_hits[st]].pair_min = -1;
re->hits[st][re->n_hits[st]].pair_max = -1;
re->hits[st][re->n_hits[st]].mapping_quality = 255;
re->hits[st][re->n_hits[st]].saved = 0;
re->hits[st][re->n_hits[st]].paired_hit_idx = NULL;
re->hits[st][re->n_hits[st]].n_paired_hit_idx = 0;
re->n_hits[st]++;
}
}
// sort list (there might be few misordered pairs because of the goff computation
for (i = 1; i < re->n_hits[st]; i++) {
j = i;
while (j >= 1
&& re->hits[st][j-1].cn == re->hits[st][i].cn
&& re->hits[st][j-1].g_off > re->hits[st][i].g_off)
j--;
if (j < i) { // shift elements at indexes j..i-1 higher
struct read_hit tmp = re->hits[st][i];
int k;
for (k = i - 1; k >= j; k--)
re->hits[st][k+1] = re->hits[st][k];
re->hits[st][j] = tmp;
}
}
re->hits[st] = (struct read_hit *)
my_realloc(re->hits[st], re->n_hits[st] * sizeof(re->hits[0][0]), re->n_anchors[st] * sizeof(re->hits[0][0]),
&mem_mapping, "hits [%s]", re->name);
}
static inline void
read_get_hit_list(struct read_entry * re, struct hit_list_options * options)
{
//llint before = gettimeinusecs();
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.hit_list_tc);
read_get_hit_list_per_strand(re, 0, options);
read_get_hit_list_per_strand(re, 1, options);
// assign sort indexes
for (int i = 0; i < re->n_hits[0]; ++i)
re->hits[0][i].sort_idx = i;
for (int i = 0; i < re->n_hits[1]; ++i)
re->hits[1][i].sort_idx = re->n_hits[0] + i;
//hit_list_usecs[omp_get_thread_num()] += gettimeinusecs() - before;
//after = rdtsc();
//tpg.hit_list_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.hit_list_tc);
#ifdef DEBUG_HIT_LIST_CREATION
fprintf(stderr, "Dumping hit list after creation for read:[%s]\n", re->name);
dump_hit_list(re, 0, false, false);
dump_hit_list(re, 1, false, false);
#endif
}
static void
read_pass1_per_strand(struct read_entry * re, int st, struct pass1_options * options)
{
int i;
int last_good_cn = -1;
unsigned int last_good_g_off = 0; // init not needed
f1_hash_tag++;
for (i = 0; i < re->n_hits[st]; i++) {
if (options->only_paired && re->hits[st][i].pair_min < 0) {
continue;
}
if (re->hits[st][i].matches < options->min_matches) {
continue;
}
// if this hit is saved, leave it be, but update last_good
if (re->hits[st][i].saved == 1) {
last_good_cn = re->hits[st][i].cn;
last_good_g_off = re->hits[st][i].g_off_pos_strand;
continue;
}
// check window overlap
if (last_good_cn >= 0
&& re->hits[st][i].cn == last_good_cn
&& re->hits[st][i].g_off_pos_strand + (unsigned int)abs_or_pct(options->window_overlap, re->window_len) <= last_good_g_off + re->window_len) {
re->hits[st][i].score_vector = 0;
re->hits[st][i].pct_score_vector = 0;
continue;
}
if (re->hits[st][i].score_vector <= 0) {
if (shrimp_mode == MODE_COLOUR_SPACE)
{
uint32_t ** gen_cs;
uint32_t ** gen_ls;
struct read_hit * rh = &re->hits[st][i];
if (rh->st != re->input_strand)
reverse_hit(re, &re->hits[st][i]);
if (re->hits[st][i].gen_st == 0) {
gen_cs = genome_cs_contigs;
gen_ls = genome_contigs;
} else {
gen_cs = genome_cs_contigs_rc;
gen_ls = genome_contigs_rc;
}
re->hits[st][i].score_vector = f1_run(gen_cs[re->hits[st][i].cn], genome_len[re->hits[st][i].cn],
re->hits[st][i].g_off, re->hits[st][i].w_len,
re->read[rh->st], re->read_len,
re->hits[st][i].g_off + re->hits[st][i].anchor.x, re->hits[st][i].anchor.y,
gen_ls[re->hits[st][i].cn], re->initbp[st], genome_is_rna, f1_hash_tag,
options->gapless);
}
else
{
re->hits[st][i].score_vector = f1_run(genome_contigs[re->hits[st][i].cn], genome_len[re->hits[st][i].cn],
re->hits[st][i].g_off, re->hits[st][i].w_len,
re->read[st], re->read_len,
re->hits[st][i].g_off + re->hits[st][i].anchor.x, re->hits[st][i].anchor.y,
NULL, -1, genome_is_rna, f1_hash_tag,
options->gapless);
}
re->hits[st][i].pct_score_vector = (1000 * 100 * re->hits[st][i].score_vector)/re->hits[st][i].score_max;
if (re->hits[st][i].score_vector >= (int)abs_or_pct(options->threshold, re->hits[st][i].score_max)) {
last_good_cn = re->hits[st][i].cn;
last_good_g_off = re->hits[st][i].g_off_pos_strand;
}
}
}
}
/*
* Go through hit list, apply vector filter, and save top scores.
*/
static inline void
read_pass1(struct read_entry * re, struct pass1_options * options)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.pass1_tc);
read_pass1_per_strand(re, 0, options);
read_pass1_per_strand(re, 1, options);
//after = rdtsc();
//tpg.pass1_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.pass1_tc);
#ifdef DEBUG_HIT_LIST_PASS1
#pragma omp critical (cs_stderr)
{
fprintf(stderr, "Dumping hit list after pass1 for read:[%s]\n", re->name);
dump_hit_list(re, 0, options->only_paired, false);
dump_hit_list(re, 1, options->only_paired, false);
}
#endif
}
#define EXTHEAP_unpaired_pass1_CMP(a, b) ((a)->pass1_key < (b)->pass1_key)
DEF_EXTHEAP(struct read_hit *,unpaired_pass1)
/*
* Go through the list adding hits to the heap.
*/
static void
read_get_vector_hits(struct read_entry * re, struct read_hit * * a, int * load, struct pass1_options * options)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.get_vector_hits_tc);
int st, i;
assert(re != NULL && a != NULL && load != NULL && *load == 0);
assert(pair_mode == PAIR_NONE || half_paired);
for (st = 0; st < 2; st++) {
assert(re->n_hits[st] == 0 || re->hits[st] != NULL);
for (i = 0; i < re->n_hits[st]; i++) {
if (re->hits[st][i].saved == 1) continue;
if (re->hits[st][i].score_vector >= (int)abs_or_pct(options->threshold, re->hits[st][i].score_max)
&& (*load < options->num_outputs
|| ( (IS_ABSOLUTE(options->threshold)
&& re->hits[st][i].score_vector > a[0]->pass1_key)
|| (!IS_ABSOLUTE(options->threshold)
&& re->hits[st][i].pct_score_vector > a[0]->pass1_key)))) {
//fprintf(stderr,"%d matches\n",re->hits[st][i].matches);
re->hits[st][i].pass1_key = (IS_ABSOLUTE(options->threshold)? re->hits[st][i].score_vector : re->hits[st][i].pct_score_vector);
//assert(re->hits[st][i].gen_st==0);
if (*load < options->num_outputs)
extheap_unpaired_pass1_insert(a, load, &re->hits[st][i]);
else
extheap_unpaired_pass1_replace_min(a, load, &re->hits[st][i]);
}
}
}
//after = rdtsc();
//tpg.get_vector_hits_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.get_vector_hits_tc);
}
/*
// don't use this for qsort directly;
static inline int
read_hit_purealign_cmp(struct read_hit * rh1, struct read_hit * rh2)
{
if (rh1->cn < rh2->cn) {
return -1;
} else if (rh1->cn == rh2->cn) {
if (rh1->st < rh2->st) {
return -1;
} else if (rh1->st == rh2->st) {
return rh1->g_off - rh2->g_off;
}
}
return 1;
}
// don't use this for qsort directly;
static inline int
read_hit_overlap_cmp(struct read_hit * rh1, struct read_hit * rh2)
{
if (rh1->cn < rh2->cn) {
return -1;
} else if (rh1->cn == rh2->cn) {
if (rh1->st < rh2->st) {
return -1;
} else if (rh1->st == rh2->st) {
if (rh1->g_off + rh1->w_len < rh2->g_off) {
return -1;
} else if (rh2->g_off + rh2->w_len < rh1->g_off) {
return 1;
} else {
return 0;
}
}
}
return 1;
}
// sort by: conting number; then strand; then g_off
static int
pass2_read_hit_align_cmp(void const * e1, void const * e2)
{
struct read_hit * rh1 = *(struct read_hit * *)e1;
struct read_hit * rh2 = *(struct read_hit * *)e2;
return read_hit_purealign_cmp(rh1, rh2);
}
// sort by: conting number; then strand; then g_off; but return 0 if overlapping
static int
pass2_read_hit_overlap_cmp(void const * e1, void const * e2)
{
struct read_hit * rh1 = *(struct read_hit * *)e1;
struct read_hit * rh2 = *(struct read_hit * *)e2;
return read_hit_overlap_cmp(rh1, rh2);
}
*/
// sort by score
static int
pass2_read_hit_score_cmp(void const * e1, void const * e2) {
return (*(struct read_hit * *)e2)->pass2_key - (*(struct read_hit * *)e1)->pass2_key;
}
static inline int
pass2_read_hit_sfrp_gen_start_cmp_base(struct read_hit const * rh1, struct read_hit const * rh2) {
if (rh1->cn != rh2->cn)
return rh1->cn - rh2->cn;
if (rh1->gen_st != rh2->gen_st)
return rh1->gen_st - rh2->gen_st;
return rh1->sfrp->genome_start - rh2->sfrp->genome_start;
}
static inline int
pass2_read_hit_sfrp_gen_end_cmp_base(struct read_hit const * rh1, struct read_hit const * rh2) {
if (rh1->cn != rh2->cn)
return rh1->cn - rh2->cn;
if (rh1->gen_st != rh2->gen_st)
return rh1->gen_st - rh2->gen_st;
return (- rh1->sfrp->genome_start - rh1->sfrp->rmapped + rh1->sfrp->deletions - rh1->sfrp->insertions)
- (- rh2->sfrp->genome_start - rh2->sfrp->rmapped + rh2->sfrp->deletions - rh2->sfrp->insertions);
}
static int
pass2_read_hit_sfrp_gen_start_cmp(void const * e1, void const * e2) {
return pass2_read_hit_sfrp_gen_start_cmp_base(*(struct read_hit * *)e1, *(struct read_hit * *)e2);
}
static int
pass2_read_hit_sfrp_gen_end_cmp(void const * e1, void const * e2) {
return pass2_read_hit_sfrp_gen_end_cmp_base(*(struct read_hit * *)e1, *(struct read_hit * *)e2);
}
// remove duplicate hits
static void
read_remove_duplicate_hits(struct read_hit * * hits_pass2, int * n_hits_pass2)
{
int i, j, k, max, max_idx;
//llint before = gettimeinusecs();
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.duplicate_removal_tc);
/*
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_align_cmp);
i = 0;
k = 0;
while (i < *n_hits_pass2) {
max = hits_pass2[i]->pass2_key;
max_idx = i;
j = i + 1;
while (j < *n_hits_pass2 && !pass2_read_hit_overlap_cmp(&hits_pass2[i], &hits_pass2[j])) {
if (hits_pass2[j]->pass2_key > max) {
max = hits_pass2[j]->pass2_key;
max_idx = j;
}
j++;
}
if (max_idx != k) {
hits_pass2[k] = hits_pass2[max_idx];
}
k++;
i = j;
}
return k;
*/
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_sfrp_gen_start_cmp);
i = 0;
k = 0;
while (i < *n_hits_pass2) {
max = hits_pass2[i]->pass2_key;
max_idx = i;
j = i + 1;
while (j < *n_hits_pass2 && !pass2_read_hit_sfrp_gen_start_cmp(&hits_pass2[i], &hits_pass2[j])) {
if (hits_pass2[j]->pass2_key > max) {
max = hits_pass2[j]->pass2_key;
max_idx = j;
}
j++;
}
if (max_idx != k) {
hits_pass2[k] = hits_pass2[max_idx];
}
k++;
i = j;
}
#pragma omp atomic
total_dup_single_matches += (*n_hits_pass2) - k;
*n_hits_pass2 = k;
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_sfrp_gen_end_cmp);
i = 0;
k = 0;
while (i < *n_hits_pass2) {
max = hits_pass2[i]->pass2_key;
max_idx = i;
j = i + 1;
while (j < *n_hits_pass2 && !pass2_read_hit_sfrp_gen_end_cmp(&hits_pass2[i], &hits_pass2[j])) {
if (hits_pass2[j]->pass2_key > max) {
max = hits_pass2[j]->pass2_key;
max_idx = j;
}
j++;
}
if (max_idx != k) {
hits_pass2[k] = hits_pass2[max_idx];
}
k++;
i = j;
}
#pragma omp atomic
total_dup_single_matches += (*n_hits_pass2) - k;
*n_hits_pass2 = k;
//duplicate_removal_usecs[omp_get_thread_num()] += gettimeinusecs() - before;
//after = rdtsc();
//tpg.duplicate_removal_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.duplicate_removal_tc);
}
static void
hit_run_post_sw(struct read_entry * re, struct read_hit * rh)
{
//fprintf(stderr, "running post sw for read: [%s]\n", re->name);
if (shrimp_mode == MODE_COLOUR_SPACE) {
post_sw(re->read[rh->st], re->initbp[rh->st], re->qual, rh->sfrp);
} else { // LS: cheat; reuse SW score to get posterior
rh->sfrp->posterior = pow(2.0, ((double)rh->sfrp->score - (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta))/score_alpha);
}
rh->sfrp->posterior_score = (int)rint(score_alpha * log(rh->sfrp->posterior) / log(2.0) + (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta));
if (rh->sfrp->posterior_score < 0)
rh->sfrp->posterior_score = 0;
rh->sfrp->pct_posterior_score = (1000 * 100 * rh->sfrp->posterior_score)/rh->score_max;
rh->score_full = rh->sfrp->posterior_score;
rh->pct_score_full = rh->sfrp->pct_posterior_score;
}
/*
* Do a final pass for given read.
*/
static bool
read_pass2(struct read_entry * re,
struct read_hit * * hits_pass1, int n_hits_pass1,
struct read_hit * * hits_pass2, int * n_hits_pass2,
struct pass2_options * options)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.pass2_tc);
int i, cnt;
assert(re != NULL && hits_pass1 != NULL && hits_pass2 != NULL);
/* compute full alignment scores */
for (i = 0; i < n_hits_pass1; i++) {
struct read_hit * rh = hits_pass1[i];
if (rh->score_full < 0 || rh->sfrp == NULL) {
hit_run_full_sw(re, rh, (int)abs_or_pct(options->threshold, rh->score_max));
if (compute_mapping_qualities && rh->score_full > 0) {
hit_run_post_sw(re, rh);
/*
fprintf(stderr, "read:%s\tSW-prob:%g\tposterior:%g\n", re->name,
pow(2.0, ((double)rh->sfrp->score - (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta))/score_alpha)
* pow(1.0 - pr_xover, rh->sfrp->rmapped - rh->sfrp->crossovers),
rh->sfrp->posterior);
*/
}
rh->pass2_key = (IS_ABSOLUTE(options->threshold)? rh->score_full : (int)rh->pct_score_full);
}
if (rh->score_full >= abs_or_pct(options->threshold, rh->score_max)) {
hits_pass2[*n_hits_pass2] = rh;
(*n_hits_pass2)++;
}
}
#ifdef DEBUG_HIT_LIST_PASS2
fprintf(stderr, "Dumping hit list after pass2 (before duplicates removal and sorting) for read:[%s]\n", re->name);
for (i = 0; i < n_hits_pass1; i++) {
dump_hit(hits_pass1[i]);
}
#endif
// remove duplicate hits
read_remove_duplicate_hits(hits_pass2, n_hits_pass2);
// sort by non-increasing score
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_score_cmp);
/*
if (compute_mapping_qualities && *n_hits_pass2 > 0) {
// compute Z
re->mq_denominator = hits_pass2[0]->sfrp->posterior;
for (i = 1; i < *n_hits_pass2 && hits_pass2[i]->score_full > hits_pass2[0]->score_full - score_difference_mq_cutoff; i++) {
re->mq_denominator += hits_pass2[i]->sfrp->posterior;
}
// compute mapping qualities
for (i = 0; i < *n_hits_pass2 && hits_pass2[i]->score_full > hits_pass2[0]->score_full - score_difference_mq_cutoff; i++) {
hits_pass2[i]->mapping_quality = qv_from_pr_corr(hits_pass2[i]->sfrp->posterior / re->mq_denominator);
if (hits_pass2[i]->mapping_quality >= 10) {
#pragma omp atomic
total_reads_matched_conf++;
}
}
for ( ; i < *n_hits_pass2; i++) {
hits_pass2[i]->mapping_quality = 0;
}
}
*/
// trim excess mappings
if (*n_hits_pass2 > options->num_outputs)
*n_hits_pass2 = options->num_outputs;
// if strata is set, keep only top scoring hits
if (options->strata && *n_hits_pass2 > 0) {
for (i = 1; i < *n_hits_pass2 && hits_pass2[0]->score_full == hits_pass2[i]->score_full; i++);
*n_hits_pass2 = i;
}
// drop reads with too many mappings
if (*n_hits_pass2 > 0) {
if (max_alignments == 0 || *n_hits_pass2 <= max_alignments) {
#pragma omp atomic
total_reads_matched++;
} else {
#pragma omp atomic
total_reads_dropped++;
*n_hits_pass2 = 0;
}
}
// mark remaining hits as saved
for (i = 0; i < *n_hits_pass2; i++) {
hits_pass2[i]->saved = 1;
hits_pass2[i]->sfrp->in_use = true;
}
// update counts
re->final_matches += *n_hits_pass2;
#pragma omp atomic
total_single_matches += re->final_matches;
// check stop condition
if (options->stop_count == 0)
return true;
for (i = 0, cnt = 0; i < *n_hits_pass2; i++) {
if (hits_pass2[i]->score_full >= (int)abs_or_pct(options->stop_threshold, hits_pass2[i]->score_max)) {
cnt++;
}
}
//after = rdtsc();
//tpg.pass2_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.pass2_tc);
return cnt >= options->stop_count;
}
static void
read_save_final_hits(read_entry * re, read_hit * * hits_pass2, int n_hits_pass2)
{
int i;
// make room for new hits
re->final_unpaired_hits = (read_hit *)
my_realloc(re->final_unpaired_hits,
(re->n_final_unpaired_hits + n_hits_pass2) * sizeof(re->final_unpaired_hits[0]),
re->n_final_unpaired_hits * sizeof(re->final_unpaired_hits[0]),
&mem_mapping, "final_unpaired_hits [%s]", re->name);
for (i = 0; i < n_hits_pass2; i++) {
memcpy(&re->final_unpaired_hits[re->n_final_unpaired_hits + i], hits_pass2[i], sizeof(*hits_pass2[0]));
// erase sfrp structs to prevent them from being freed too early
hits_pass2[i]->sfrp = NULL;
}
re->n_final_unpaired_hits += n_hits_pass2;
}
void
handle_read(struct read_entry * re, struct read_mapping_options_t * options, int n_options)
{
bool done;
int option_index = 0;
int i;
struct read_hit * * hits_pass1 = NULL;
struct read_hit * * hits_pass2 = NULL;
int n_hits_pass1;
int n_hits_pass2;
llint before = gettimeinusecs();
if (re->mapidx[0] == NULL) {
read_get_mapidxs(re);
}
do {
if (options[option_index].regions.recompute) {
region_map_id++;
region_map_id &= ((1 << region_map_id_bits) - 1);
read_get_region_counts(re, 0, &options[option_index].regions);
read_get_region_counts(re, 1, &options[option_index].regions);
}
if (options[option_index].anchor_list.recompute) {
read_free_anchor_list(re, &mem_mapping);
read_get_anchor_list(re, &options[option_index].anchor_list);
}
if (options[option_index].hit_list.recompute) {
read_free_hit_list(re, &mem_mapping);
read_get_hit_list(re, &options[option_index].hit_list);
}
if (options[option_index].pass1.recompute) {
read_pass1(re, &options[option_index].pass1);
}
hits_pass1 = (struct read_hit * *)
my_malloc(options[option_index].pass1.num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s]", re->name);
n_hits_pass1 = 0;
read_get_vector_hits(re, hits_pass1, &n_hits_pass1, &options[option_index].pass1);
hits_pass2 = (struct read_hit * *)
my_malloc(options[option_index].pass1.num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s]", re->name);
n_hits_pass2 = 0;
done = read_pass2(re, hits_pass1, n_hits_pass1, hits_pass2, &n_hits_pass2, &options[option_index].pass2);
if (n_hits_pass2 > 0) {
if (options[option_index].pass2.save_outputs)
read_save_final_hits(re, hits_pass2, n_hits_pass2);
else {
read_output(re, hits_pass2, n_hits_pass2);
for (i = 0; i < n_hits_pass2; i++)
hits_pass2[i]->sfrp->in_use = false;
}
re->mapped = true;
}
// free pass1 structs
for (i = 0; i < n_hits_pass1; i++)
if (hits_pass1[i]->sfrp != NULL && !hits_pass1[i]->sfrp->in_use)
free_sfrp(&hits_pass1[i]->sfrp, re, &mem_mapping);
my_free(hits_pass1, options[option_index].pass1.num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s]", re->name);
my_free(hits_pass2, options[option_index].pass1.num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s]", re->name);
} while (!done && ++option_index < n_options);
//if (options_index >= n_options) {
// this read fell through all the option sets
//}
tpg.read_handle_usecs += gettimeinusecs() - before;
if (pair_mode == PAIR_NONE) {
if (aligned_reads_file != NULL && re->mapped) {
#pragma omp critical (aligned_reads_file)
{
fasta_write_read(aligned_reads_file, re);
}
}
if ((unaligned_reads_file != NULL || sam_unaligned) && !re->mapped) {
#pragma omp critical (unaligned_reads_file)
{
if (unaligned_reads_file != NULL) {
fasta_write_read(unaligned_reads_file, re);
}
}
if (sam_unaligned) {
hit_output(re, NULL, NULL, false, NULL, 0);
}
}
}
}
#define EXTHEAP_paired_pass1_CMP(a, b) ((a).key < (b).key)
DEF_EXTHEAP(struct read_hit_pair, paired_pass1)
/*
* Go through the hit lists, constructing paired hits.
*/
static void
readpair_get_vector_hits(struct read_entry * re1, struct read_entry * re2,
struct read_hit_pair * a, int * load,
struct pairing_options * options)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.get_vector_hits_tc);
int st1, st2, i, j;
read_hit_pair tmp;
assert(re1 != NULL && re2 != NULL && a != NULL);
for (st1 = 0; st1 < 2; st1++) {
st2 = 1 - st1; // opposite strand
for (i = 0; i < re1->n_hits[st1]; i++) {
if (re1->hits[st1][i].saved == 1) continue;
if (re1->hits[st1][i].pair_min < 0)
continue;
for (j = re1->hits[st1][i].pair_min; j <= re1->hits[st1][i].pair_max; j++) {
if (re2->hits[st2][j].saved == 1) continue;
//if (re1->hits[st1][i].matches + re2->hits[st2][j].matches < options->min_num_matches)
// continue;
tmp.score = re1->hits[st1][i].score_vector + re2->hits[st2][j].score_vector;
tmp.score_max = re1->hits[st1][i].score_max + re2->hits[st2][j].score_max;
tmp.pct_score = (1000 * 100 * tmp.score)/tmp.score_max;
tmp.key = (IS_ABSOLUTE(options->pass1_threshold)? tmp.score : tmp.pct_score);
tmp.improper_mapping = false;
tmp.rh_idx[0] = -1;
tmp.rh_idx[1] = -1;
if (tmp.score >= (int)abs_or_pct(options->pass1_threshold, tmp.score_max)
&& (*load < options->pass1_num_outputs || tmp.key > a[0].key)) {
tmp.rh[0] = &re1->hits[st1][i];
tmp.rh[1] = &re2->hits[st2][j];
//TODO HISTORGRAM IS OFF! NOT SAM
tmp.insert_size = (int)(st1 == 0?
re2->hits[st2][j].g_off - (re1->hits[st1][i].g_off + re1->hits[st1][i].w_len) :
re1->hits[st1][i].g_off - (re2->hits[st2][j].g_off + re2->hits[st2][j].w_len));
if (*load < options->pass1_num_outputs)
extheap_paired_pass1_insert(a, load, tmp);
else
extheap_paired_pass1_replace_min(a, load, tmp);
}
}
}
}
//after = rdtsc();
//tpg.get_vector_hits_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.get_vector_hits_tc);
}
/*
// sort by: contig number; then strand; then g_off of hit[0]
static int
pass2_readpair_hit0_align_cmp(void const * e1, void const * e2) {
struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1;
struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2;
return read_hit_purealign_cmp(rhp1->rh[0], rhp2->rh[0]);
}
// sort by: contig number; then strand; then g_off of hit[1]
static int
pass2_readpair_hit1_align_cmp(void const * e1, void const * e2) {
struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1;
struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2;
return read_hit_purealign_cmp(rhp1->rh[1], rhp2->rh[1]);
}
// sort by: contig number; then strand; then g_off of hit[0]; but return 0 if hit[0] overlapping
static int
pass2_readpair_hit0_overlap_cmp(void const * e1, void const * e2) {
struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1;
struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2;
return read_hit_overlap_cmp(rhp1->rh[0], rhp2->rh[0]);
}
// sort by: contig number; then strand; then g_off of hit[1]; but return 0 if hit[1] overlapping
static int
pass2_readpair_hit1_overlap_cmp(void const * e1, void const * e2) {
struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1;
struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2;
return read_hit_overlap_cmp(rhp1->rh[1], rhp2->rh[1]);
}
*/
// sort by score
static int
pass2_read_hit_pair_score_cmp(void const * e1, void const * e2)
{
return ((struct read_hit_pair *)e2)->key - ((struct read_hit_pair *)e1)->key;
}
static int
pass2_readpair_hit0_sfrp_gen_start_cmp(void const * e1, void const * e2)
{
return pass2_read_hit_sfrp_gen_start_cmp_base(((read_hit_pair *)e1)->rh[0], ((read_hit_pair *)e2)->rh[0]);
}
static int
pass2_readpair_hit1_sfrp_gen_start_cmp(void const * e1, void const * e2)
{
return pass2_read_hit_sfrp_gen_start_cmp_base(((read_hit_pair *)e1)->rh[1], ((read_hit_pair *)e2)->rh[1]);
}
static int
pass2_readpair_hit0_sfrp_gen_end_cmp(void const * e1, void const * e2)
{
return pass2_read_hit_sfrp_gen_end_cmp_base(((read_hit_pair *)e1)->rh[0], ((read_hit_pair *)e2)->rh[0]);
}
static int
pass2_readpair_hit1_sfrp_gen_end_cmp(void const * e1, void const * e2)
{
return pass2_read_hit_sfrp_gen_end_cmp_base(((read_hit_pair *)e1)->rh[1], ((read_hit_pair *)e2)->rh[1]);
}
static int
pass2_readpair_pointer_cmp(void const * e1, void const * e2)
{
struct read_hit_pair * rhpp1 = (struct read_hit_pair *)e1;
struct read_hit_pair * rhpp2 = (struct read_hit_pair *)e2;
if ((rhpp1->rh[0] == NULL) != (rhpp2->rh[0] == NULL)) {
if (rhpp1->rh[0] == NULL)
return -1;
else
return 1;
}
if ((rhpp1->rh[0] != NULL) and (rhpp2->rh[0] != NULL)
and (rhpp1->rh[0]->sort_idx != rhpp2->rh[0]->sort_idx))
return rhpp1->rh[0]->sort_idx - rhpp2->rh[0]->sort_idx;
else {
if ((rhpp1->rh[1] == NULL) != (rhpp2->rh[1] == NULL)) {
if (rhpp1->rh[1] == NULL)
return -1;
else
return 1;
}
if (rhpp1->rh[1] != NULL)
return rhpp1->rh[1]->sort_idx - rhpp2->rh[1]->sort_idx;
else
return 0;
}
if (rhpp1->rh[0] < rhpp2->rh[0])
return -1;
else if (rhpp1->rh[0] > rhpp2->rh[0])
return 1;
else { // equal rh[0]
if (rhpp1->rh[1] < rhpp2->rh[1])
return -1;
else if (rhpp1->rh[1] > rhpp2->rh[1])
return 1;
}
return 0;
}
static inline void
readpair_compute_paired_hit(struct read_hit * rh1, struct read_hit * rh2, bool threshold_is_absolute,
struct read_hit_pair * dest)
{
dest->rh[0] = rh1;
dest->rh[1] = rh2;
dest->score_max = rh1->score_max + rh2->score_max;
dest->score = rh1->score_full + rh2->score_full;
dest->pct_score = (1000 * 100 * dest->score)/dest->score_max;
dest->key = threshold_is_absolute? dest->score : dest->pct_score;
//dest->insert_size = abs(get_insert_size(rh1, rh2));
int ins_sz = get_insert_size(rh1, rh2);
int sign;
if (pair_mode == PAIR_OPP_IN || pair_mode == PAIR_COL_FW) {
if (rh1->gen_st == 0)
sign = +1;
else
sign = -1;
} else { // PAIR_OPP_OUT, PAIR_COL_BW
if (rh1->gen_st == 1)
sign = +1;
else
sign = -1;
}
dest->insert_size = sign * ins_sz;
dest->improper_mapping = false;
//tmp.isize_score=expected_isize==-1 ? 0 : abs(tmp.isize-expected_isize);
}
static void
readpair_push_dominant_single_hits(struct read_hit_pair * hits_pass2, int * n_hits_pass2, bool threshold_is_absolute,
int num_in_pair, int (*cmp)(void const *, void const *))
{
int i, j, k, max, max_idx;
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), cmp);
i = 0;
while (i < *n_hits_pass2) {
max = hits_pass2[i].rh[num_in_pair]->score_full;
max_idx = i;
j = i + 1;
while (j < *n_hits_pass2 && !cmp((void *)&hits_pass2[i], (void *)&hits_pass2[j])) {
if (hits_pass2[j].rh[num_in_pair]->score_full > max) {
max = hits_pass2[j].rh[num_in_pair]->score_full;
max_idx = j;
}
j++;
}
for (k = i; k < j; k++) {
if (k != max_idx) {
hits_pass2[k].rh[num_in_pair] = hits_pass2[max_idx].rh[num_in_pair];
readpair_compute_paired_hit(hits_pass2[k].rh[0], hits_pass2[k].rh[1], threshold_is_absolute, &hits_pass2[k]);
}
}
i = j;
}
}
// remove duplicate hits
static void
readpair_remove_duplicate_hits(struct read_hit_pair * hits_pass2, int * n_hits_pass2, bool threshold_is_absolute)
{
/*
int i, j, k, l, max, max_idx;
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_hit0_align_cmp);
i = 0;
k = 0;
while (i < *n_hits_pass2) {
j = i + 1;
while (j < *n_hits_pass2 && !pass2_readpair_hit0_overlap_cmp(&hits_pass2[j-1], &hits_pass2[j])) {
j++;
}
if (j > i + 1) {
qsort(&hits_pass2[i], j - i, sizeof(hits_pass2[0]), pass2_readpair_hit1_align_cmp);
}
while (i < j) {
max = hits_pass2[i].key;
max_idx = i;
l = i + 1;
while (l < j && !pass2_readpair_hit1_overlap_cmp(&hits_pass2[max_idx], &hits_pass2[l])) {
if (hits_pass2[l].key > max) {
max = hits_pass2[l].key;
max_idx = l;
}
l++;
}
if (max_idx != k) {
hits_pass2[k] = hits_pass2[max_idx];
}
k++;
i = l;
}
}
return k;
*/
int tmp;
//llint before = gettimeinusecs();
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.duplicate_removal_tc);
readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 0, pass2_readpair_hit0_sfrp_gen_start_cmp);
readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 0, pass2_readpair_hit0_sfrp_gen_end_cmp);
readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 1, pass2_readpair_hit1_sfrp_gen_start_cmp);
readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 1, pass2_readpair_hit1_sfrp_gen_end_cmp);
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_pointer_cmp);
tmp = removedups(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_pointer_cmp);
#pragma omp atomic
total_dup_paired_matches += (*n_hits_pass2) - tmp;
*n_hits_pass2 = tmp;
//duplicate_removal_usecs[omp_get_thread_num()] += gettimeinusecs() - before;
//after = rdtsc();
//tpg.duplicate_removal_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.duplicate_removal_tc);
}
/*
* Do a final pass for given read.
*/
static bool
readpair_pass2(struct read_entry * re1, struct read_entry * re2,
struct read_hit_pair * hits_pass1, int n_hits_pass1,
struct read_hit_pair * hits_pass2, int * n_hits_pass2,
struct pairing_options * options,
struct pass2_options * options1, struct pass2_options * options2)
{
//llint before = rdtsc(), after;
TIME_COUNTER_START(tpg.pass2_tc);
int i, j, cnt;
/* compute full alignment scores */
for (i = 0; i < n_hits_pass1; i++) {
for (j = 0; j < 2; j++) {
struct read_hit * rh = hits_pass1[i].rh[j];
struct read_entry * re = (j == 0? re1 : re2);
double thres = (j == 0? options1->threshold : options2->threshold);
if (rh->score_full < 0 || rh->sfrp == NULL) {
hit_run_full_sw(re, rh, (int)abs_or_pct(thres, rh->score_max));
if (compute_mapping_qualities && rh->score_full > 0) {
hit_run_post_sw(re, rh);
}
}
}
//hitpair_run_post_sw(re1, re2, hits_pass1[i].rh[0], hits_pass1[i].rh[1]);
if (hits_pass1[i].rh[0]->score_full == 0 || hits_pass1[i].rh[1]->score_full == 0) {
continue;
}
if (hits_pass1[i].rh[0]->score_full + hits_pass1[i].rh[1]->score_full
>= (int)abs_or_pct(options->pass2_threshold, hits_pass1[i].score_max)) {
readpair_compute_paired_hit(hits_pass1[i].rh[0], hits_pass1[i].rh[1], IS_ABSOLUTE(options->pass2_threshold),
&hits_pass2[*n_hits_pass2]);
(*n_hits_pass2)++;
}
}
#ifdef DEBUG_HIT_LIST_PASS2
#pragma omp critical (cs_stderr)
{
fprintf(stderr, "Dumping paired hits after pass2 (before duplicates removal and sorting) for reads:[%s,%s]\n",
re1->name, re2->name);
for (i = 0; i < n_hits_pass1; i++) {
dump_hit(hits_pass1[i].rh[0]);
dump_hit(hits_pass1[i].rh[1]);
}
}
#endif
// remove duplicates
readpair_remove_duplicate_hits(hits_pass2, n_hits_pass2, IS_ABSOLUTE(options->pass2_threshold));
#ifdef DEBUG_HIT_LIST_PASS2_AFTER
#pragma omp critical (cs_stderr)
{
fprintf(stderr, "Dumping paired hits after pass2 (after duplicates removal) for reads:[%s,%s]\n",
re1->name, re2->name);
for (i = 0; i < *n_hits_pass2; i++) {
dump_hit(hits_pass2[i].rh[0]);
dump_hit(hits_pass2[i].rh[1]);
}
}
#endif
// sort by score
qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_pair_score_cmp);
#ifdef DEBUG_HIT_LIST_PASS2_AFTER
#pragma omp critical (cs_stderr)
{
fprintf(stderr, "Dumping paired hits after pass2 (after duplicates removal and sorting) for reads:[%s,%s]\n",
re1->name, re2->name);
for (i = 0; i < *n_hits_pass2; i++) {
dump_hit(hits_pass2[i].rh[0]);
dump_hit(hits_pass2[i].rh[1]);
}
}
#endif
// trim excess mappings
if (*n_hits_pass2 > options->pass2_num_outputs)
*n_hits_pass2 = options->pass2_num_outputs;
// if strata is set, keep only top scoring hits
if (options->strata && *n_hits_pass2 > 0) {
for (i = 1; i < *n_hits_pass2 && hits_pass2[0].score == hits_pass2[i].score; i++);
*n_hits_pass2 = i;
}
// drop pairs with too many mappings
if (*n_hits_pass2 > 0) {
if (max_alignments == 0 || *n_hits_pass2 <= max_alignments) {
#pragma omp atomic
total_pairs_matched++;
} else {
#pragma omp atomic
total_pairs_dropped++;
*n_hits_pass2 = 0;
}
}
// mark remaining hits as saved
for (i = 0; i < *n_hits_pass2; i++) {
hits_pass2[i].rh[0]->saved = 1;
hits_pass2[i].rh[0]->sfrp->in_use = true;
hits_pass2[i].rh[1]->saved = 1;
hits_pass2[i].rh[1]->sfrp->in_use = true;
}
// update counts
re1->final_matches += *n_hits_pass2;
#pragma omp atomic
total_paired_matches += re1->final_matches;
// check stop condition
if (options->stop_count == 0)
return true;
for (i = 0, cnt = 0; i < *n_hits_pass2; i++) {
if (hits_pass2[i].score >= (int)abs_or_pct(options->stop_threshold, hits_pass2[i].score_max)) {
cnt++;
}
}
//after = rdtsc();
//tpg.pass2_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.pass2_tc);
return cnt >= options->stop_count;
}
static void
readpair_compute_mp_ranges(struct read_entry * re1, struct read_entry * re2,
struct pairing_options * options)
{
switch (pair_mode) {
case PAIR_OPP_IN:
re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len;
re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len;
re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len);
re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[1];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[0];
break;
case PAIR_OPP_OUT:
/*
re1->delta_g_off_min[0] = - options->max_insert_size - re2->window_len;
re1->delta_g_off_max[0] = - options->min_insert_size + (re1->window_len - re1->read_len) - re2->read_len;
re1->delta_g_off_min[1] = options->min_insert_size + re1->read_len + (re2->read_len - re2->window_len);
re1->delta_g_off_max[1] = options->max_insert_size + re1->window_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[1];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[0];
*/
re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len;
re1->delta_g_off_min[0] += re1->read_len + re2->read_len;
re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len;
re1->delta_g_off_max[0] += re1->read_len + re2->read_len;
re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len);
re1->delta_g_off_min[1] -= re1->read_len + re2->read_len;
re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len;
re1->delta_g_off_max[1] -= re1->read_len + re2->read_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[1];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[0];
break;
case PAIR_COL_FW:
/*
re1->delta_g_off_min[0] = options->min_insert_size + (re2->read_len - re2->window_len);
re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len);
re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len - re2->window_len;
re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len - re2->read_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[0];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[1];
*/
re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len;
re1->delta_g_off_min[0] += re2->read_len;
re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len;
re1->delta_g_off_max[0] += re2->read_len;
re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len);
re1->delta_g_off_min[1] -= re2->read_len;
re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len;
re1->delta_g_off_max[1] -= re2->read_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[0];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[1];
break;
case PAIR_COL_BW:
/*
re1->delta_g_off_min[0] = - options->max_insert_size + (re2->read_len - re2->window_len);
re1->delta_g_off_max[0] = - options->min_insert_size + (re1->window_len - re1->read_len);
re1->delta_g_off_min[1] = options->min_insert_size + re1->read_len - re2->window_len;
re1->delta_g_off_max[1] = options->max_insert_size + re1->window_len - re2->read_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[0];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[1];
*/
re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len;
re1->delta_g_off_min[0] += re1->read_len;
re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len;
re1->delta_g_off_max[0] += re1->read_len;
re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len);
re1->delta_g_off_min[1] -= re1->read_len;
re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len;
re1->delta_g_off_max[1] -= re1->read_len;
re2->delta_g_off_min[0] = - re1->delta_g_off_max[0];
re2->delta_g_off_max[0] = - re1->delta_g_off_min[0];
re2->delta_g_off_min[1] = - re1->delta_g_off_max[1];
re2->delta_g_off_max[1] = - re1->delta_g_off_min[1];
break;
default:
assert(0);
break;
}
re1->delta_region_min[0] = re1->delta_g_off_min[0] >= 0? re1->delta_g_off_min[0]/(1 << region_bits) : - 1 - (- re1->delta_g_off_min[0] - 1)/(1 << region_bits);
re1->delta_region_max[0] = re1->delta_g_off_max[0] > 0? 1 + (re1->delta_g_off_max[0] - 1)/(1 << region_bits) : - (- re1->delta_g_off_max[0]/(1 << region_bits));
re1->delta_region_min[1] = re1->delta_g_off_min[1] >= 0? re1->delta_g_off_min[1]/(1 << region_bits) : - 1 - (- re1->delta_g_off_min[1] - 1)/(1 << region_bits);
re1->delta_region_max[1] = re1->delta_g_off_max[1] > 0? 1 + (re1->delta_g_off_max[1] - 1)/(1 << region_bits) : - (- re1->delta_g_off_max[1]/(1 << region_bits));
re2->delta_region_min[0] = re2->delta_g_off_min[0] >= 0? re2->delta_g_off_min[0]/(1 << region_bits) : - 1 - (- re2->delta_g_off_min[0] - 1)/(1 << region_bits);
re2->delta_region_max[0] = re2->delta_g_off_max[0] > 0? 1 + (re2->delta_g_off_max[0] - 1)/(1 << region_bits) : - (- re2->delta_g_off_max[0]/(1 << region_bits));
re2->delta_region_min[1] = re2->delta_g_off_min[1] >= 0? re2->delta_g_off_min[1]/(1 << region_bits) : - 1 - (- re2->delta_g_off_min[1] - 1)/(1 << region_bits);
re2->delta_region_max[1] = re2->delta_g_off_max[1] > 0? 1 + (re2->delta_g_off_max[1] - 1)/(1 << region_bits) : - (- re2->delta_g_off_max[1]/(1 << region_bits));
#ifdef DEBUG_DUMP_MP_RANGES
fprintf(stderr, "mp_ranges read[%s]: goff_min[0]:%d goff_max[0]:%d goff_min[1]:%d goff_max[1]:%d reg_min[0]:%d reg_max[0]:%d reg_min[1]:%d reg_max[1]:%d\n",
re1->name,
re1->delta_g_off_min[0], re1->delta_g_off_max[0], re1->delta_g_off_min[1], re1->delta_g_off_max[1],
re1->delta_region_min[0], re1->delta_region_max[0], re1->delta_region_min[1], re1->delta_region_max[1]);
fprintf(stderr, "mp_ranges read[%s]: goff_min[0]:%d goff_max[0]:%d goff_min[1]:%d goff_max[1]:%d reg_min[0]:%d reg_max[0]:%d reg_min[1]:%d reg_max[1]:%d\n",
re2->name,
re2->delta_g_off_min[0], re2->delta_g_off_max[0], re2->delta_g_off_min[1], re2->delta_g_off_max[1],
re2->delta_region_min[0], re2->delta_region_max[0], re2->delta_region_min[1], re2->delta_region_max[1]);
#endif
}
// move paired hit structs to area that persists until the output
static void
readpair_save_final_hits(pair_entry * pe, read_hit_pair * hits_pass2, int n_hits_pass2)
{
int i, nip, j;
read_hit * rhp;
read_hit_pair * rhpp;
// first, copy array of paired hit entries
pe->final_paired_hits = (struct read_hit_pair *)
my_realloc(pe->final_paired_hits,
(pe->n_final_paired_hits + n_hits_pass2) * sizeof(pe->final_paired_hits[0]),
pe->n_final_paired_hits * sizeof(pe->final_paired_hits[0]),
&mem_mapping, "final_paired_hits [%s,%s]", pe->re[0]->name, pe->re[1]->name);
memcpy(&pe->final_paired_hits[pe->n_final_paired_hits], hits_pass2, n_hits_pass2 * sizeof(pe->final_paired_hits[0]));
// next, copy read_hit entries to persistent pool
for (i = 0; i < n_hits_pass2; i++) {
for (nip = 0; nip < 2; nip++) {
// did we already move pe->final_paired_hits[pe->n_final_paired_hits + i]?
if (pe->final_paired_hits[pe->n_final_paired_hits + i].rh[nip] != NULL) {
// no, need to move it now
pe->final_paired_hit_pool[nip] = (read_hit *)
my_realloc(pe->final_paired_hit_pool[nip],
(pe->final_paired_hit_pool_size[nip] + 1) * sizeof(pe->final_paired_hit_pool[nip][0]),
pe->final_paired_hit_pool_size[nip] * sizeof(pe->final_paired_hit_pool[nip][0]),
&mem_mapping, "final_paired_hit_pool[%d] [%s,%s]", nip, pe->re[0]->name, pe->re[1]->name);
pe->final_paired_hit_pool_size[nip]++;
rhp = &pe->final_paired_hit_pool[nip][pe->final_paired_hit_pool_size[nip] - 1];
memcpy(rhp, pe->final_paired_hits[pe->n_final_paired_hits + i].rh[nip], sizeof(read_hit));
// the sfrp pointer was copied, delete old reference to prevent it from being freed too soon
hits_pass2[i].rh[nip]->sfrp = NULL;
// now change pointers in final_paired_hits array to new read_entry location
// use indexes rather than pointers though
for (j = i; j < n_hits_pass2; j++) {
rhpp = &pe->final_paired_hits[pe->n_final_paired_hits + j];
if (rhpp->rh[nip] == hits_pass2[i].rh[nip]) {
rhpp->rh[nip] = NULL;
rhpp->rh_idx[nip] = pe->final_paired_hit_pool_size[nip] - 1;
rhp->paired_hit_idx = (int *)
my_realloc(rhp->paired_hit_idx,
(rhp->n_paired_hit_idx + 1) * sizeof(rhp->paired_hit_idx[0]),
rhp->n_paired_hit_idx * sizeof(rhp->paired_hit_idx[0]),
&mem_mapping, "paired_hits [%s]", pe->re[nip]->name);
rhp->n_paired_hit_idx++;
rhp->paired_hit_idx[rhp->n_paired_hit_idx - 1] = pe->n_final_paired_hits + j;
}
}
}
}
}
pe->n_final_paired_hits += n_hits_pass2;
}
void
handle_readpair(pair_entry * pe,
struct readpair_mapping_options_t * options, int n_options)
{
read_entry * re1 = pe->re[0];
read_entry * re2 = pe->re[1];
bool done;
int option_index = 0;
int i;
struct read_hit_pair * hits_pass1 = NULL;
struct read_hit_pair * hits_pass2 = NULL;
int n_hits_pass1;
int n_hits_pass2;
llint before = gettimeinusecs();
read_get_mapidxs(re1);
read_get_mapidxs(re2);
do {
readpair_compute_mp_ranges(re1, re2, &options[option_index].pairing);
if (options[option_index].read[0].regions.recompute || options[option_index].read[1].regions.recompute) {
region_map_id++;
region_map_id &= ((1 << region_map_id_bits) - 1);
read_get_region_counts(re1, 0, &options[option_index].read[0].regions);
read_get_region_counts(re1, 1, &options[option_index].read[0].regions);
read_get_region_counts(re2, 0, &options[option_index].read[1].regions);
read_get_region_counts(re2, 1, &options[option_index].read[1].regions);
if (options[option_index].read[0].anchor_list.use_mp_region_counts) {
read_get_mp_region_counts(re1, 0);
read_get_mp_region_counts(re1, 1);
}
if (options[option_index].read[1].anchor_list.use_mp_region_counts) {
read_get_mp_region_counts(re2, 0);
read_get_mp_region_counts(re2, 1);
}
}
if (options[option_index].read[0].anchor_list.recompute) {
read_free_anchor_list(re1, &mem_mapping);
read_get_anchor_list(re1, &options[option_index].read[0].anchor_list);
}
if (options[option_index].read[1].anchor_list.recompute) {
read_free_anchor_list(re2, &mem_mapping);
read_get_anchor_list(re2, &options[option_index].read[1].anchor_list);
}
if (options[option_index].read[0].hit_list.recompute) {
read_free_hit_list(re1, &mem_mapping);
read_get_hit_list(re1, &options[option_index].read[0].hit_list);
}
if (options[option_index].read[1].hit_list.recompute) {
read_free_hit_list(re2, &mem_mapping);
read_get_hit_list(re2, &options[option_index].read[1].hit_list);
}
readpair_pair_up_hits(re1, re2);
if (options[option_index].read[0].pass1.recompute) {
read_pass1(re1, &options[option_index].read[0].pass1);
}
if (options[option_index].read[1].pass1.recompute) {
read_pass1(re2, &options[option_index].read[1].pass1);
}
hits_pass1 = (struct read_hit_pair *)
my_malloc(options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s,%s]", re1->name, re2->name);
n_hits_pass1 = 0;
readpair_get_vector_hits(re1, re2, hits_pass1, &n_hits_pass1, &options[option_index].pairing);
hits_pass2 = (struct read_hit_pair *)
my_malloc(options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s,%s]", re1->name, re2->name);
n_hits_pass2 = 0;
done = readpair_pass2(re1, re2, hits_pass1, n_hits_pass1, hits_pass2, &n_hits_pass2, &options[option_index].pairing,
&options[option_index].read[0].pass2, &options[option_index].read[1].pass2);
if (n_hits_pass2 > 0) {
if (options[option_index].pairing.save_outputs)
readpair_save_final_hits(pe, hits_pass2, n_hits_pass2);
else {
readpair_output_no_mqv(pe, hits_pass2, n_hits_pass2);
for (i = 0; i < n_hits_pass2; i++) {
hits_pass2[i].rh[0]->sfrp->in_use = false;
hits_pass2[i].rh[1]->sfrp->in_use = false;
}
}
pe->mapped = true;
}
for (i = 0; i < n_hits_pass1; i++) {
if (hits_pass1[i].rh[0]->sfrp != NULL && !hits_pass1[i].rh[0]->sfrp->in_use)
free_sfrp(&hits_pass1[i].rh[0]->sfrp, re1, &mem_mapping);
if (hits_pass1[i].rh[1]->sfrp != NULL && !hits_pass1[i].rh[1]->sfrp->in_use)
free_sfrp(&hits_pass1[i].rh[1]->sfrp, re2, &mem_mapping);
}
my_free(hits_pass1, options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s,%s]", re1->name, re2->name);
my_free(hits_pass2, options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s,%s]", re1->name, re2->name);
} while (!done && ++option_index < n_options);
tpg.read_handle_usecs += gettimeinusecs() - before;
if (option_index >= n_options && half_paired) {
// this read pair fell through all the option sets; try unpaired mapping
handle_read(re1, unpaired_mapping_options[0], n_unpaired_mapping_options[0]);
handle_read(re2, unpaired_mapping_options[1], n_unpaired_mapping_options[1]);
}
// OUTPUT
readpair_output(pe);
if (aligned_reads_file != NULL && (pe->mapped || re1->mapped || re2->mapped)) {
#pragma omp critical (aligned_reads_file)
{
fasta_write_read(aligned_reads_file, re1);
fasta_write_read(aligned_reads_file, re2);
}
}
if ((unaligned_reads_file != NULL || sam_unaligned) && !(pe->mapped || re1->mapped || re2->mapped)) {
#pragma omp critical (unaligned_reads_file)
{
if (unaligned_reads_file != NULL) {
fasta_write_read(unaligned_reads_file, re1);
fasta_write_read(unaligned_reads_file, re2);
}
}
if (sam_unaligned) {
hit_output(re1, NULL, NULL, true, NULL, 0);
hit_output(re2, NULL, NULL, false, NULL, 0);
}
}
}
|
9953.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(static, 1) num_threads(2)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
sparseblockmat_omp_kernels.h | #include <omp.h>
#include "config.h"
//for the fmas it is important to activate -mfma compiler flag
namespace dg{
// general multiply kernel
template<class value_type>
void ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data, const int * RESTRICT cols_idx,
const int * RESTRICT data_idx,
const int num_rows, const int num_cols, const int blocks_per_line,
const int n,
const int left_size, const int right_size,
const int * RESTRICT right_range,
const value_type * RESTRICT x, value_type * RESTRICT y
)
{
#pragma omp for nowait //manual collapse(2)
for( int si = 0; si<left_size*num_rows; si++)
{
int s = si / num_rows;
int i = si % num_rows;
#ifdef _MSC_VER //MSVC does not support variable lenght arrays...
int* J = (int*)alloca(blocks_per_line * sizeof(int));
#else
int J[blocks_per_line];
#endif
for( int d=0; d<blocks_per_line; d++)
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for( int k=0; k<n; k++)
{
#ifdef _MSC_VER
int* B = (int*)alloca(blocks_per_line * sizeof(int));
#else
int B[blocks_per_line];
#endif
for( int d=0; d<blocks_per_line; d++)
B[d] = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA(data[ B[d]+q],
x[(J[d]+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
//specialized multiply kernel
template<class value_type, int n, int blocks_per_line>
void ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data, const int * RESTRICT cols_idx,
const int * RESTRICT data_idx,
const int num_rows, const int num_cols,
const int left_size, const int right_size,
const int * RESTRICT right_range,
const value_type * RESTRICT x, value_type * RESTRICT y
)
{
//basically we check which direction is the largest and parallelize that one
if(right_size==1)
{
//trivial means that the data blocks do not change among rows
bool trivial = true;
for( int i=1; i<num_rows-1; i++)
for( int d=0; d<blocks_per_line; d++)
{
if( data_idx[i*blocks_per_line+d]
!= data_idx[blocks_per_line+d]) trivial = false;
}
if(trivial)
{
value_type xprivate[blocks_per_line*n];
value_type dprivate[blocks_per_line*n*n];
for( int d=0; d<blocks_per_line; d++)
for( int k=0; k<n; k++)
for( int q=0; q<n; q++)
{
int B = data_idx[blocks_per_line+d];
dprivate[(k*blocks_per_line+d)*n+q] = data[(B*n+k)*n+q];
}
#pragma omp for nowait
for( int s=0; s<left_size; s++)
{
for( int i=0; i<1; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA(data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
#ifndef _MSC_VER
#pragma omp SIMD //very important for KNL
#endif
for( int i=1; i<num_rows-1; i++)
{
for( int k=0; k<n; k++)
{
int I = ((s*num_rows + i)*n+k);
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
int B = n*blocks_per_line*k;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
for( int q=0; q<n; q++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n+q;
temp = DG_FMA( dprivate[B+d*n+q], x[J], temp);
}
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
for( int i=num_rows-1; i<num_rows; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA( data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
}
} //trivial
else // not trivial
{
value_type xprivate[blocks_per_line*n];
#pragma omp for nowait
for( int s=0; s<left_size; s++)
for( int i=0; i<num_rows; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA( data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
}// trivial
}// right_size==1
else // right_size != 1
{
value_type dprivate[blocks_per_line*n];
int J[blocks_per_line];
if( !( (right_range[1]-right_range[0]) > 100*left_size*num_rows*n )) //typically a derivative in y ( Ny*Nz >~ Nx)
{
#pragma omp for nowait
for (int sik = 0; sik < left_size*num_rows*n; sik++)
{
int s = sik / (num_rows*n);
int i = (sik % (num_rows*n)) / n;
int k = (sik % (num_rows*n)) % n;
for( int d=0; d<blocks_per_line; d++)
{
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for(int q=0; q<n; q++)
dprivate[d*n+q] = data[B+q];
}
#ifndef _MSC_VER
#pragma omp SIMD //very important for KNL
#endif
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
int Jd = J[d];
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA( dprivate[ d*n+q],
x[(Jd+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
else //typically a derivative in z (since n*n*Nx*Ny > 100*Nz)
{
for (int sik = 0; sik < left_size*num_rows*n; sik++)
{
int s = sik / (num_rows*n);
int i = (sik % (num_rows*n)) / n;
int k = (sik % (num_rows*n)) % n;
for( int d=0; d<blocks_per_line; d++)
{
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for(int q=0; q<n; q++)
dprivate[d*n+q] = data[B+q];
}
#pragma omp for SIMD nowait
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
// if y[I] isnan then even beta = 0 does not make it 0
y[I] = beta == 0 ? (value_type)0 : y[I]*beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
int Jd = J[d];
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA( dprivate[ d*n+q],
x[(Jd+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
}
template<class value_type, int n>
void call_ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data_ptr, const int * RESTRICT cols_ptr,
const int * RESTRICT block_ptr,
const int num_rows, const int num_cols, const int blocks_per_line,
const int left_size, const int right_size,
const int * RESTRICT right_range_ptr,
const value_type * RESTRICT x_ptr, value_type * RESTRICT y_ptr)
{
if( blocks_per_line == 1)
ell_multiply_kernel<value_type, n, 1> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 2)
ell_multiply_kernel<value_type, n, 2> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 3)
ell_multiply_kernel<value_type, n, 3> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 4)
ell_multiply_kernel<value_type, n, 4> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else
ell_multiply_kernel<value_type> (alpha, beta, data_ptr, cols_ptr,
block_ptr, num_rows, num_cols, blocks_per_line, n, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
}
template<class value_type>
void EllSparseBlockMatDevice<value_type>::launch_multiply_kernel( value_type alpha, const value_type* x_ptr, value_type beta, value_type* y_ptr) const
{
const value_type* data_ptr = thrust::raw_pointer_cast( &data[0]);
const int* cols_ptr = thrust::raw_pointer_cast( &cols_idx[0]);
const int* block_ptr = thrust::raw_pointer_cast( &data_idx[0]);
const int* right_range_ptr = thrust::raw_pointer_cast( &right_range[0]);
if( n == 1)
call_ell_multiply_kernel<value_type, 1> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 2)
call_ell_multiply_kernel<value_type, 2> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 3)
call_ell_multiply_kernel<value_type, 3> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 4)
call_ell_multiply_kernel<value_type, 4> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 5)
call_ell_multiply_kernel<value_type, 5> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else
ell_multiply_kernel<value_type> ( alpha, beta, data_ptr, cols_ptr,
block_ptr, num_rows, num_cols, blocks_per_line, n, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
}
template<class value_type>
void coo_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y, const CooSparseBlockMatDevice<value_type>& m )
{
#pragma omp for nowait
for (int skj = 0; skj < m.left_size*m.n*m.right_size; skj++)
{
int s = skj / (m.n*m.right_size);
int k = (skj % (m.n*m.right_size)) / m.right_size;
int j = (skj % (m.n*m.right_size)) % m.right_size;
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*m.n + k)*m.right_size + j;
value_type temp = 0;
for (int q = 0; q < m.n; q++) //multiplication-loop
temp = DG_FMA(m.data[(m.data_idx[i] * m.n + k)*m.n + q],
x[m.cols_idx[i]][(q*m.left_size +s )*m.right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
template<class value_type, int n>
void coo_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y, const CooSparseBlockMatDevice<value_type>& m )
{
bool trivial = true;
int CC = m.cols_idx[0], DD = m.data_idx[0];
for( int i=0; i<m.num_entries; i++)
if( CC+i != m.cols_idx[i] || DD+i != m.data_idx[i])
trivial=false;
if( trivial)
{
#pragma omp for SIMD nowait
for (int sj = 0; sj < m.left_size*m.right_size; sj++)
{
int s = sj / m.right_size;
int j = (sj % m.right_size) % m.right_size;
for( int k=0; k<n; k++)
{
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*n + k)*m.right_size + j;
int DDD = ((DD +i)*n+k)*n, CCC = CC+i;
value_type temp = 0;
for (int q = 0; q < n; q++) //multiplication-loop
temp = DG_FMA(m.data[DDD + q],
x[CCC][q*m.left_size*m.right_size +sj],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
else
{
#pragma omp for SIMD nowait
for (int sj = 0; sj < m.left_size*m.right_size; sj++)
{
int s = sj / m.right_size;
int j = (sj % m.right_size) % m.right_size;
for( int k=0; k<n; k++)
{
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*n + k)*m.right_size + j;
value_type temp = 0;
for (int q = 0; q < n; q++) //multiplication-loop
temp = DG_FMA(m.data[(m.data_idx[i] * n + k)*n + q],
x[m.cols_idx[i]][q*m.left_size*m.right_size +sj],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
template<class value_type>
void CooSparseBlockMatDevice<value_type>::launch_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y) const
{
if( n == 1)
coo_multiply_kernel<value_type, 1>( alpha, x, beta, y, *this);
else if( n == 2)
coo_multiply_kernel<value_type, 2>( alpha, x, beta, y, *this);
else if( n == 3)
coo_multiply_kernel<value_type, 3>( alpha, x, beta, y, *this);
else if( n == 4)
coo_multiply_kernel<value_type, 4>( alpha, x, beta, y, *this);
else
coo_multiply_kernel<value_type>( alpha, x, beta, y, *this);
}
}//namespace dg
|
GB_unaryop__lnot_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_fp32
// op(A') function: GB_tran__lnot_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mvm_nkn32.h | // Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef _H_MVM_NKN32
#define _H_MVM_NKN32
#include "tensor_desc.h"
#include "thread_affinity.h"
inline void mvm_nkn32(U32 fn, U32 fk, const F32 *filterArray, F32 *input, F32 *output)
{
#ifdef _USE_OPENMP
#pragma omp parallel for num_threads(OMP_NUM_THREADS)
#endif
for (U32 n = 0; n < fn; n++) {
F32 *in = input;
const F32 *f = filterArray + n * fk * 32;
F32 *out = output + n * 32;
#ifdef __aarch64__
__asm__ __volatile__("ldr d0, [%[in]]\n"
"ldr q1, [%[out]]\n"
"ldr q2, [%[out], #16]\n"
"ldr q3, [%[out], #32]\n"
"ldr q4, [%[out], #48]\n"
"ldr q13, [%[out], #64]\n"
"ldr q14, [%[out], #80]\n"
"ldr q15, [%[out], #96]\n"
"ldr q16, [%[out], #112]\n"
"mov x0, %[k]\n"
"ldr q5, [%[f]]\n"
"ldr q6, [%[f], #16]\n"
"ldr q7, [%[f], #32]\n"
"ldr q8, [%[f], #48]\n"
"ldr q17, [%[f], #64]\n"
"ldr q18, [%[f], #80]\n"
"ldr q19, [%[f], #96]\n"
"ldr q20, [%[f], #112]\n"
"0:\n"
"prfm pldl2strm, [%[f], #4096]\n"
"prfm pldl1strm, [%[f], #1024]\n"
"ldr d9, [%[f], #128]\n"
"fmla v1.4s, v5.4s, v0.s[0]\n"
"ldr x9, [%[f], #136]\n"
"ins v9.d[1], x9\n"
"ldr d10, [%[f], #144]\n"
"fmla v2.4s, v6.4s, v0.s[0]\n"
"ldr x10, [%[f], #152]\n"
"ins v10.d[1], x10\n"
"ldr d11, [%[f], #160]\n"
"fmla v3.4s, v7.4s, v0.s[0]\n"
"ldr x11, [%[f], #168]\n"
"ins v11.d[1], x11\n"
"ldr d12, [%[f], #176]\n"
"fmla v4.4s, v8.4s, v0.s[0]\n"
"ldr x12, [%[f], #184]\n"
"ins v12.d[1], x12\n"
"ldr d21, [%[f], #192]\n"
"fmla v13.4s, v17.4s, v0.s[0]\n"
"ldr x9, [%[f], #200]\n"
"ins v21.d[1], x9\n"
"ldr d22, [%[f], #208]\n"
"fmla v14.4s, v18.4s, v0.s[0]\n"
"ldr x10, [%[f], #216]\n"
"ins v22.d[1], x10\n"
"ldr d23, [%[f], #224]\n"
"fmla v15.4s, v19.4s, v0.s[0]\n"
"ldr x11, [%[f], #232]\n"
"ins v23.d[1], x11\n"
"ldr d24, [%[f], #240]\n"
"fmla v16.4s, v20.4s, v0.s[0]\n"
"ldr x12, [%[f], #248]\n"
"ins v24.d[1], x12\n"
"add %[f], %[f], #256\n"
"ldr d5, [%[f]]\n"
"fmla v1.4s, v9.4s, v0.s[1]\n"
"ldr x5, [%[f], #8]\n"
"ins v5.d[1], x5\n"
"ldr d6, [%[f], #16]\n"
"fmla v2.4s, v10.4s, v0.s[1]\n"
"ldr x6, [%[f], #24]\n"
"ins v6.d[1], x6\n"
"ldr d7, [%[f], #32]\n"
"fmla v3.4s, v11.4s, v0.s[1]\n"
"ldr x7, [%[f], #40]\n"
"ins v7.d[1], x7\n"
"ldr d8, [%[f], #48]\n"
"fmla v4.4s, v12.4s, v0.s[1]\n"
"ldr x8, [%[f], #56]\n"
"ins v8.d[1], x8\n"
"ldr d17, [%[f], #64]\n"
"fmla v13.4s, v21.4s, v0.s[1]\n"
"ldr x5, [%[f], #72]\n"
"ins v17.d[1], x5\n"
"ldr d18, [%[f], #80]\n"
"fmla v14.4s, v22.4s, v0.s[1]\n"
"ldr x6, [%[f], #88]\n"
"ins v18.d[1], x6\n"
"ldr d19, [%[f], #96]\n"
"fmla v15.4s, v23.4s, v0.s[1]\n"
"ldr x7, [%[f], #104]\n"
"ins v19.d[1], x7\n"
"ldr d20, [%[f], #112]\n"
"fmla v16.4s, v24.4s, v0.s[1]\n"
"ldr x8, [%[f], #120]\n"
"add %[in], %[in], #8\n"
"ins v20.d[1], x8\n"
"ldr d0, [%[in]]\n"
"sub x0, x0, #2\n"
"cmp x0, #3\n"
"bgt 0b\n"
"ldr q9, [%[f], #128]\n"
"ldr q10, [%[f], #144]\n"
"ldr q11, [%[f], #160]\n"
"ldr q12, [%[f], #176]\n"
"ldr q21, [%[f], #192]\n"
"ldr q22, [%[f], #208]\n"
"ldr q23, [%[f], #224]\n"
"ldr q24, [%[f], #240]\n"
"fmla v1.4s, v5.4s, v0.s[0]\n"
"fmla v2.4s, v6.4s, v0.s[0]\n"
"fmla v3.4s, v7.4s, v0.s[0]\n"
"fmla v4.4s, v8.4s, v0.s[0]\n"
"fmla v13.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v18.4s, v0.s[0]\n"
"fmla v15.4s, v19.4s, v0.s[0]\n"
"fmla v16.4s, v20.4s, v0.s[0]\n"
"fmla v1.4s, v9.4s, v0.s[1]\n"
"fmla v2.4s, v10.4s, v0.s[1]\n"
"fmla v3.4s, v11.4s, v0.s[1]\n"
"fmla v4.4s, v12.4s, v0.s[1]\n"
"fmla v13.4s, v21.4s, v0.s[1]\n"
"fmla v14.4s, v22.4s, v0.s[1]\n"
"fmla v15.4s, v23.4s, v0.s[1]\n"
"fmla v16.4s, v24.4s, v0.s[1]\n"
"cmp x0, #3\n"
"bne 1f\n"
"add %[f], %[f], #256\n"
"ldr s0, [%[in], #8]\n"
"ldr q5, [%[f]]\n"
"ldr q6, [%[f], #16]\n"
"ldr q7, [%[f], #32]\n"
"ldr q8, [%[f], #48]\n"
"ldr q17, [%[f], #64]\n"
"ldr q18, [%[f], #80]\n"
"ldr q19, [%[f], #96]\n"
"ldr q20, [%[f], #112]\n"
"fmla v1.4s, v5.4s, v0.s[0]\n"
"fmla v2.4s, v6.4s, v0.s[0]\n"
"fmla v3.4s, v7.4s, v0.s[0]\n"
"fmla v4.4s, v8.4s, v0.s[0]\n"
"fmla v13.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v18.4s, v0.s[0]\n"
"fmla v15.4s, v19.4s, v0.s[0]\n"
"fmla v16.4s, v20.4s, v0.s[0]\n"
"1:\n"
"str q1, [%[out]]\n"
"str q2, [%[out], #16]\n"
"str q3, [%[out], #32]\n"
"str q4, [%[out], #48]\n"
"str q13, [%[out], #64]\n"
"str q14, [%[out], #80]\n"
"str q15, [%[out], #96]\n"
"str q16, [%[out], #112]\n"
: [out] "+r"(out), [f] "+r"(f), [in] "+r"(in)
: [k] "r"((I64)fk)
: "memory", "cc", "x0", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
"x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24");
#else
__asm__ __volatile__("vld1.f32 {d0[0]}, [%[in]]!\n"
"mov r2, %[out]\n"
"mov r3, %[out]\n"
"vld1.f32 {d2-d3}, [r2]!\n"
"vld1.f32 {d4-d5}, [r2]!\n"
"vld1.f32 {d6-d7}, [r2]!\n"
"vld1.f32 {d8-d9}, [r2]!\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"mov r4, %[k]\n"
"vld1.f32 {d10-d11}, [r2]!\n"
"vld1.f32 {d12-d13}, [r2]!\n"
"vld1.f32 {d14-d15}, [r2]!\n"
"vld1.f32 {d16-d17}, [r2]\n"
"vld1.f32 {d26-d27}, [%[f]]!\n"
"vld1.f32 {d28-d29}, [%[f]]!\n"
"0:\n"
"cmp r4, #3\n"
"ble 3f\n"
"pld [%[f], #374]\n"
"vld1.f32 {d30[0]}, [%[in]]!\n"
"vmla.f32 q1, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q2, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vmla.f32 q3, q11, d0[0]\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vmla.f32 q4, q12, d0[0]\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"vmla.f32 q5, q13, d0[0]\n"
"vld1.f32 {d26-d27}, [%[f]]!\n"
"vmla.f32 q6, q14, d0[0]\n"
"vld1.f32 {d28-d29}, [%[f]]!\n"
"vmla.f32 q7, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q8, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"pld [%[f], #374]\n"
"vmov.f32 q0, q15\n"
"vld1.f32 {d30[0]}, [%[in]]!\n"
"vmla.f32 q1, q11, d0[0]\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vmla.f32 q2, q12, d0[0]\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"vmla.f32 q3, q13, d0[0]\n"
"vld1.f32 {d26-d27}, [%[f]]!\n"
"vmla.f32 q4, q14, d0[0]\n"
"vld1.f32 {d28-d29}, [%[f]]!\n"
"vmla.f32 q5, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q6, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vmla.f32 q7, q11, d0[0]\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vmla.f32 q8, q12, d0[0]\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"sub r4, r4, #3\n"
"pld [%[f], #374]\n"
"vmov.f32 q0, q15\n"
"vld1.f32 {d30[0]}, [%[in]]!\n"
"vmla.f32 q1, q13, d0[0]\n"
"vld1.f32 {d26-d27}, [%[f]]!\n"
"vmla.f32 q2, q14, d0[0]\n"
"vld1.f32 {d28-d29}, [%[f]]!\n"
"vmla.f32 q3, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q4, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vmla.f32 q5, q11, d0[0]\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vmla.f32 q6, q12, d0[0]\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"vmla.f32 q7, q13, d0[0]\n"
"vld1.f32 {d26-d27}, [%[f]]!\n"
"vmla.f32 q8, q14, d0[0]\n"
"vld1.f32 {d28-d29}, [%[f]]!\n"
"vmov.f32 q0, q15\n"
"b 0b\n"
"3:\n"
"sub r4, r4, #1\n"
"vmla.f32 q1, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q2, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vmla.f32 q3, q11, d0[0]\n"
"vmla.f32 q4, q12, d0[0]\n"
"vmla.f32 q5, q13, d0[0]\n"
"vmla.f32 q6, q14, d0[0]\n"
"vmla.f32 q7, q9, d0[0]\n"
"vmla.f32 q8, q10, d0[0]\n"
"1:\n"
"cmp r4, #0\n"
"beq 2f\n"
"sub r4, r4, #1\n"
"vld1.f32 {d0[0]}, [%[in]]!\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"vmla.f32 q1, q9, d0[0]\n"
"vld1.f32 {d18-d19}, [%[f]]!\n"
"vmla.f32 q2, q10, d0[0]\n"
"vld1.f32 {d20-d21}, [%[f]]!\n"
"vmla.f32 q3, q11, d0[0]\n"
"vld1.f32 {d22-d23}, [%[f]]!\n"
"vmla.f32 q4, q12, d0[0]\n"
"vld1.f32 {d24-d25}, [%[f]]!\n"
"vmla.f32 q5, q9, d0[0]\n"
"vmla.f32 q6, q10, d0[0]\n"
"vmla.f32 q7, q11, d0[0]\n"
"vmla.f32 q8, q12, d0[0]\n"
"b 1b\n"
"2:\n"
"vst1.f32 {d2-d3}, [r3]!\n"
"vst1.f32 {d4-d5}, [r3]!\n"
"vst1.f32 {d6-d7}, [r3]!\n"
"vst1.f32 {d8-d9}, [r3]!\n"
"vst1.f32 {d10-d11}, [r3]!\n"
"vst1.f32 {d12-d13}, [r3]!\n"
"vst1.f32 {d14-d15}, [r3]!\n"
"vst1.f32 {d16-d17}, [r3]\n"
: [f] "+r"(f), [in] "+r"(in)
: [k] "r"(fk), [out] "r"(out)
: "memory", "cc", "r2", "r3", "r4", "q0", "q1", "q2", "q3", "q4", "q5",
"q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
}
#endif
|
NonUniformScheme.h | /*
* NonUniformScheme.h
* CubismUP_3D
*
* Created by Fabian Wermelinger 05/08/2017
* Copyright 2017 ETH Zurich. All rights reserved.
*
*/
#ifndef CubismUP_3D_NonUniformScheme_h
#define CubismUP_3D_NonUniformScheme_h
#include "../Definitions.h"
#include <Cubism/BlockInfo.h>
#include <Cubism/MeshMap.h>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <vector>
CubismUP_3D_NAMESPACE_BEGIN
template <typename TBlock>
class NonUniformScheme
{
static constexpr size_t StencilMax = 3;
public:
NonUniformScheme(
const double xS, const double xE,
const double yS, const double yE,
const double zS, const double zE,
const unsigned int nBlocksX, const unsigned int nBlocksY,
const unsigned int nBlocksZ) :
m_h_min{HUGE_VAL,HUGE_VAL,HUGE_VAL}, m_h_max{-1,-1,-1},
m_initialized(false),
m_map_x(xS,xE,nBlocksX),
m_map_y(yS,yE,nBlocksY),
m_map_z(zS,zE,nBlocksZ)
{}
~NonUniformScheme() {}
typedef cubism::MeshMap<TBlock> TMeshMap;
template <int _S, int _E>
class HaloVector
{
public:
static const int START = _S;
static const int END = _E;
inline double operator()(const int i) const { return m_data[i+_S]; }
inline double& operator()(const int i) { return m_data[i+_S]; }
inline void clear() { std::vector<double>().swap(m_data); }
inline void fill(TMeshMap& mmap, const double* const halos)
{
m_data.resize(mmap.ncells() + _S + _E);
m_data.insert(m_data.begin(), &halos[0], &halos[_S]);
m_data.insert(m_data.begin()+_S, mmap.data_grid_spacing(), mmap.data_grid_spacing()+mmap.ncells());
m_data.insert(m_data.begin()+_S+mmap.ncells(), &halos[_S], &halos[_S+_E]);
}
private:
std::vector<double> m_data;
};
typedef HaloVector<StencilMax,StencilMax> TVector;
void init(const cubism::MeshDensity* const kernel_x,
const cubism::MeshDensity* const kernel_y,
const cubism::MeshDensity* const kernel_z)
{
double ghosts[2*StencilMax];
m_map_x.init(kernel_x, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_x.fill(m_map_x, &ghosts[0]);
m_map_y.init(kernel_y, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_y.fill(m_map_y, &ghosts[0]);
m_map_z.init(kernel_z, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_z.fill(m_map_z, &ghosts[0]);
for (size_t i = 0; i < m_map_x.ncells(); ++i) {
m_h_max[0] = std::max( m_h_max[0], m_map_x.cell_width(i) );
m_h_min[0] = std::min( m_h_min[0], m_map_x.cell_width(i) );
//if(m_map_x.cell_width(i)>m_h_max[0]) m_h_max[0]=m_map_x.cell_width(i);
//if(m_map_x.cell_width(i)<m_h_min[0]) m_h_min[0]=m_map_x.cell_width(i);
}
for (size_t i = 0; i < m_map_y.ncells(); ++i) {
m_h_max[1] = std::max( m_h_max[1], m_map_y.cell_width(i) );
m_h_min[1] = std::min( m_h_min[1], m_map_y.cell_width(i) );
//if(m_map_y.cell_width(i)>m_h_max[1]) m_h_max[1]=m_map_y.cell_width(i);
//if(m_map_y.cell_width(i)<m_h_min[1]) m_h_min[1]=m_map_y.cell_width(i);
}
for (size_t i = 0; i < m_map_z.ncells(); ++i) {
m_h_max[2] = std::max( m_h_max[2], m_map_z.cell_width(i) );
m_h_min[2] = std::min( m_h_min[2], m_map_z.cell_width(i) );
//if(m_map_z.cell_width(i)>m_h_max[2]) m_h_max[2]=m_map_z.cell_width(i);
//if(m_map_z.cell_width(i)<m_h_min[2]) m_h_min[2]=m_map_z.cell_width(i);
}
assert( m_h_max[0]>=m_h_min[0] );
assert( m_h_max[1]>=m_h_min[1] );
assert( m_h_max[2]>=m_h_min[2] );
m_initialized = true;
}
template <typename TFD>
void setup_coefficients(std::vector<cubism::BlockInfo>& infos,
const bool cleanup = false)
{
if (!m_initialized)
{
fprintf(stderr,"ERROR: NonUniformScheme: Not initialized.\n");
fflush(0); exit(1);
}
// 0. some checks
// 1. compute coefficients for scheme TFD
// 2. distribute coefficients over blocks
// 3. cleanup (optional, if multiple schemes are setup, cleanup only at
// the end of initializing the last scheme.)
// 0.
assert(TFD::HALO_S <= StencilMax);
assert(TFD::HALO_E <= StencilMax);
// 1. non-uniform finite differences
TFD fd_x(m_map_x.ncells());
TFD fd_y(m_map_y.ncells());
TFD fd_z(m_map_z.ncells());
fd_x.setup(&m_all_delta_x(0), m_map_x.ncells());
fd_y.setup(&m_all_delta_y(0), m_map_y.ncells());
fd_z.setup(&m_all_delta_z(0), m_map_z.ncells());
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_x;
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_y;
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_z;
// 2.
#pragma omp parallel for
for(size_t i=0; i<infos.size(); ++i)
{
cubism::BlockInfo info = infos[i];
TBlock& b = *(TBlock*)info.ptrBlock;
{
const int index = info.index[0];
const unsigned int offset = TBlock::sizeX * index;
set_x(fd_x, b.fd_cx, offset);
}
{
const int index = info.index[1];
const unsigned int offset = TBlock::sizeY * index;
set_y(fd_y, b.fd_cy, offset);
}
{
const int index = info.index[2];
const unsigned int offset = TBlock::sizeZ * index;
set_z(fd_z, b.fd_cz, offset);
}
}
// 3.
if (cleanup)
{
m_all_delta_x.clear();
m_all_delta_y.clear();
m_all_delta_z.clear();
}
}
// TODO: [fabianw@mavt.ethz.ch; Mon Jan 22 2018 07:44:01 PM (+0100)] Is this
/*
void setup_inverse_spacing(std::vector<cubism::BlockInfo>& infos)
{
if (!m_initialized)
{
fprintf(stderr,"ERROR: NonUniformScheme: Not initialized.\n");
fflush(0); exit(1);
}
#pragma omp parallel for
for(int i=0; i<(int)infos.size(); ++i)
{
cubism::BlockInfo info = infos[i];
TBlock& b = *(TBlock*)info.ptrBlock;
const int indx = info.index[0];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_x.get_grid_spacing(indx), &b.invh_x[0]);
const int indy = info.index[1];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_y.get_grid_spacing(indy), &b.invh_y[0]);
const int indy = info.index[2];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_z.get_grid_spacing(indz), &b.invh_z[0]);
}
}
*/
inline const TMeshMap& get_map_x() const { return m_map_x; }
inline const TMeshMap& get_map_y() const { return m_map_y; }
inline const TMeshMap& get_map_z() const { return m_map_z; }
inline TMeshMap& get_map_x() { return m_map_x; }
inline TMeshMap& get_map_y() { return m_map_y; }
inline TMeshMap& get_map_z() { return m_map_z; }
inline double minimum_cell_width(const int i=-1) const
{
assert(i < 3 && i > -2);
if (!m_initialized)
{
fprintf(stderr, "ERROR: NonUniformScheme.h: minimum_cell_width() "
"can not return m_h_min, not initialized.\n");
fflush(0); exit(1);
}
const double all_min = std::min({m_h_min[0], m_h_min[1], m_h_min[2]});
if (-1 == i) return all_min;
else return m_h_min[i];
}
inline double maximum_cell_width(const int i=-1) const
{
assert(i < 3 && i > -2);
if (!m_initialized)
{
fprintf(stderr, "ERROR: NonUniformScheme.h: maximum_cell_width() "
"can not return m_h_max, not initialized.\n");
fflush(0); exit(1);
}
const double all_max = std::max({m_h_max[0], m_h_max[1], m_h_max[2]});
if (-1 == i) return all_max;
else return m_h_max[i];
}
void print_mesh_statistics(const bool verb=true)
{
if (verb)
{
_compute_mesh_stats("x-direction", m_map_x.kernel_name(),
m_map_x.data_grid_spacing(), m_map_x.ncells() );
_compute_mesh_stats("y-direction", m_map_y.kernel_name(),
m_map_y.data_grid_spacing(), m_map_y.ncells() );
_compute_mesh_stats("z-direction", m_map_z.kernel_name(),
m_map_z.data_grid_spacing(), m_map_z.ncells() );
}
}
double compute_mean_grid_spacing()
{
const double avgHx = _harmonicMean(m_map_x.data_grid_spacing(),
m_map_x.ncells());
const double avgHy = _harmonicMean(m_map_y.data_grid_spacing(),
m_map_y.ncells());
const double avgHz = _harmonicMean(m_map_z.data_grid_spacing(),
m_map_z.ncells());
//return ( avgHx + avgHy + avgHz ) / 3;
return 3 / (1/avgHx + 1/avgHy + 1/avgHz);
}
double _harmonicMean(const double* const data, const unsigned int N)
{
double hmean = 0;
for (unsigned int i = 0; i < N; ++i) hmean += 1.0 / data[i];
return N / hmean;
}
private:
double m_h_min[3], m_h_max[3];
bool m_initialized;
TMeshMap m_map_x;
TMeshMap m_map_y;
TMeshMap m_map_z;
TVector m_all_delta_x;
TVector m_all_delta_y;
TVector m_all_delta_z;
template <int _BSIZE>
void _set_block_invh(const double* const grid_spacing, Real* const invh)
{
for (int i = 0; i < _BSIZE; ++i)
invh[i] = 1.0/grid_spacing[i];
}
void _compute_mesh_stats(const std::string header, const std::string name,
const double* const data, const unsigned int N)
{
const auto deltaM2 = [](double i, double delta) {
return std::pow(delta, 2) * i / (i+1.0);
};
const auto deltaM3 = [](double i, double delta, double M2) {
const double norm = i * (i-1.0) / std::pow(i+1.0, 2);
const double corr = 3*delta * M2 / (i+1.0);
return std::pow(delta, 3) * norm - corr;
};
const auto deltaM4 = [](double i, double delta, double M2, double M3) {
const double norm = i * (i*i - i + 1.0) / std::pow(i+1.0, 3);
const double cor1 = 6 * std::pow(delta, 2) * M2 / std::pow(i+1.0, 2);
const double cor2 = 4 * delta * M3 / (i+1.0);
return std::pow(delta, 4) * norm + cor1 - cor2;
};
printf("%s statistics %s.\n", name.c_str(), header.c_str());
{
double mean = 0, var = 0, skew = 0, kurt = 0;
double min = HUGE_VAL, max = -HUGE_VAL;
for (unsigned int i = 0; i < N; ++i) {
if (data[i] < min) min = data[i];
if (data[i] > max) max = data[i];
}
double M2 = 0, M3 = 0, M4 = 0;
for (unsigned int i = 0; i < N; ++i) {
const double delta = data[i] - mean;
const double dM2 = deltaM2(i, delta);
const double dM3 = deltaM3(i, delta, M2);
const double dM4 = deltaM4(i, delta, M2, M3);
mean += delta / (i+1.0);
M4 += dM4;
M3 += dM3;
M2 += dM2;
}
var = M2 / (N - 1);
skew = std::sqrt(N) * M3 / std::pow(M2 + 2e-16, 1.5);
kurt = N * M4 / (M2 * M2 + 2e-16) - 3;
printf("\tMesh spacing: mean=%e; std=%e; skew=%e; kurt=%e; min=%e; max=%e\n",
mean, std::sqrt(var), skew, kurt, min, max);
}
{
double mean = 0, var = 0, skew = 0, kurt = 0;
double min = HUGE_VAL, max = -HUGE_VAL;
for (unsigned int i = 1; i < N; ++i) {
const double r = data[i]/data[i-1];
if (r < min) min = r;
if (r > max) max = r;
}
double M2 = 0, M3 = 0, M4 = 0;
for (unsigned int i = 1; i < N; ++i) {
const double r = data[i]/data[i-1];
const double delta = r - mean;
const double dM2 = deltaM2(i, delta);
const double dM3 = deltaM3(i, delta, M2);
const double dM4 = deltaM4(i, delta, M2, M3);
mean += delta / (i+1.0);
M4 += dM4;
M3 += dM3;
M2 += dM2;
}
var = M2 / (N - 1);
skew = std::sqrt(N) * M3 / std::pow(M2 + 2e-16, 1.5);
kurt = N * M4 / (M2 * M2 + 2e-16) - 3;
printf("\tGrowth factor: mean=%e; std=%e; skew=%e; kurt=%e; min=%e; max=%e\n",
mean, std::sqrt(var), skew, kurt, min, max);
}
}
};
CubismUP_3D_NAMESPACE_END
#endif // NonUniformScheme
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define _DEFAULT_SOURCE
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static const char* ompt_task_status_t_values[] = {
NULL,
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch" // 7
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_task_memory_t ompt_get_task_memory;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_finalize_tool_t ompt_finalize_tool;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts a second NOP instruction (another 4 bytes). For non-void runtime
// functions Clang inserts a STW instruction (but only if compiling under
// -fno-PIC which will be the default with Clang 8.0, another 4 bytes).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8, ((char *)addr) - 12)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#elif KMP_ARCH_RISCV64
#if __riscv_compressed
// On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10)
#else
// On RV64G the NOP instruction is 4 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12)
#endif
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
#define register_callback_t(name, type) \
do { \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
} while (0)
#define register_callback(name) register_callback_t(name, name##_t)
#ifndef USE_PRIVATE_TOOL
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
}
}
static void on_ompt_callback_reduction(ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_reduction_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_reduction_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(flags & ompt_task_initial)
{
char buffer[2048];
format_task_type(flags, buffer);
// Only check initial task not created by teams construct
if (team_size == 1 && thread_num == 1 && parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_initial_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32 ", index=%" PRIu32 ", flags=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num, flags);
} else {
printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num);
}
break;
case ompt_scope_end:
if(flags & ompt_task_initial){
printf("%" PRIu64 ": ompt_event_initial_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
} else {
printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num);
}
break;
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
}
}
static void
on_ompt_callback_master(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams";
printf("%" PRIu64 ": ompt_event_%s_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_%s=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, event, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value, size,
requested_team_size, codeptr_ra, invoker);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
printf("%" PRIu64 ": ompt_event_%s_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, event, parallel_data->value,
encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL, encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n",
ompt_get_thread_data()->value, first_task_data->value,
(second_task_data ? second_task_data->value : -1),
ompt_task_status_t_values[prior_task_status], prior_task_status);
if (prior_task_status == ompt_task_complete ||
prior_task_status == ompt_task_late_fulfill) {
printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
return 0; //success
}
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool");
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_callback(ompt_callback_mutex_acquire);
register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_callback(ompt_callback_nest_lock);
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t);
register_callback(ompt_callback_control_tool);
register_callback(ompt_callback_flush);
register_callback(ompt_callback_cancel);
register_callback(ompt_callback_implicit_task);
register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_callback(ompt_callback_work);
register_callback(ompt_callback_master);
register_callback(ompt_callback_parallel_begin);
register_callback(ompt_callback_parallel_end);
register_callback(ompt_callback_task_create);
register_callback(ompt_callback_task_schedule);
register_callback(ompt_callback_dependences);
register_callback(ompt_callback_task_dependence);
register_callback(ompt_callback_thread_begin);
register_callback(ompt_callback_thread_end);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
#endif // ifndef USE_PRIVATE_TOOL
|
mandel-omp.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
for (row = 0; row < height; ++row) {
#pragma omp task
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
DenseVector.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseVector.h
// \brief Header file for the OpenMP-based dense vector SMP implementation
//
// Copyright (C) 2012-2017 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseVector.h>
#include <blaze/math/expressions/SparseVector.h>
#include <blaze/math/functors/AddAssign.h>
#include <blaze/math/functors/Assign.h>
#include <blaze/math/functors/DivAssign.h>
#include <blaze/math/functors/MultAssign.h>
#include <blaze/math/functors/SubAssign.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/typetraits/IsDenseVector.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Subvector.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 // Transpose flag of the right-hand side dense vector
, typename OP > // Type of the assignment operation
void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<VT1>;
using ET2 = ElementType_<VT2>;
constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<VT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( subvector<aligned>( ~lhs, index, size, unchecked ) );
const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( subvector<aligned>( ~lhs, index, size, unchecked ) );
const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) );
const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) );
op( target, source );
}
else {
auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) );
const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) );
op( target, source );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 // Transpose flag of the right-hand side sparse vector
, typename OP > // Type of the assignment operation
void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) );
const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) );
op( target, source );
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function performs the OpenMP-based SMP assignment to a dense vector. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, Assign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, AddAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a vector to
// a dense vector. Due to the explicit application of the SFINAE principle, this function can
// only be selected by the compiler in case both operands are SMP-assignable and the element
// types of both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due
// to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SubAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// vector. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function implements the OpenMP-based SMP multiplication assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
multAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, MultAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// DIVISION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector divisor.
// \return void
//
// This function implements the default OpenMP-based SMP division assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
divAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector divisor.
// \return void
//
// This function implements the OpenMP-based SMP division assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
divAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, DivAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINTS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
CG.h | /**
* This file contains (modified) code from the Eigen library.
* Eigen License:
*
* Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
* Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
*
* This Source Code Form is subject to the terms of the Mozilla
* Public License v. 2.0. If a copy of the MPL was not distributed
* with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* ======================
*
* The modifications are part of the Eigen Recursive Matrix Extension (ERME).
* ERME License:
*
* Copyright (c) 2019 Darius Rückert
* Licensed under the MIT License.
*/
#pragma once
#include "../Core.h"
#include "../Core/ParallelHelper.h"
#include "Cholesky.h"
namespace Eigen::Recursive
{
template <typename _Scalar>
class RecursiveDiagonalPreconditioner
{
typedef _Scalar Scalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
public:
typedef typename Vector::StorageIndex StorageIndex;
enum
{
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
RecursiveDiagonalPreconditioner() : m_isInitialized(false) {}
void resize(int N) { m_invdiag.resize(N); }
template <typename MatType>
explicit RecursiveDiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
{
compute(mat);
}
Eigen::Index rows() const { return m_invdiag.size(); }
Eigen::Index cols() const { return m_invdiag.size(); }
template <typename MatType>
RecursiveDiagonalPreconditioner& analyzePattern(const MatType&)
{
return *this;
}
// Sparse Matrix Initialization
template <typename Scalar, int options>
// RecursiveDiagonalPreconditioner& factorize(const MatType& mat)
RecursiveDiagonalPreconditioner& factorize(const SparseMatrix<Scalar, options>& mat)
{
using MatType = SparseMatrix<Scalar, options>;
m_invdiag.resize(mat.cols());
for (int j = 0; j < mat.outerSize(); ++j)
{
typename MatType::InnerIterator it(mat, j);
while (it && it.index() != j) ++it;
if (it && it.index() == j)
// m_invdiag(j) = Scalar(1)/it.value();
removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(inverseCholesky(it.value()));
else
// m_invdiag(j) = Scalar(1);
removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(MultiplicativeNeutral<Scalar>::get());
}
m_isInitialized = true;
return *this;
}
// Dense Matrix Initialization
template <typename MatType>
RecursiveDiagonalPreconditioner& factorize(const MatType& mat)
{
m_invdiag.resize(mat.cols());
for (int j = 0; j < mat.outerSize(); ++j)
{
removeMatrixScalar(m_invdiag(j)) = removeMatrixScalar(inverseCholesky(mat(j, j)));
}
m_isInitialized = true;
return *this;
}
template <typename T>
RecursiveDiagonalPreconditioner& factorize(const Eigen::DiagonalMatrix<T, -1>& mat)
{
auto N = mat.rows();
if (m_invdiag.rows() != N)
{
std::terminate();
m_invdiag.resize(N);
}
#pragma omp for
for (int j = 0; j < N; ++j)
{
m_invdiag(j) = inverseCholesky(mat.diagonal()(j));
}
m_isInitialized = true;
return *this;
}
template <typename MatType>
RecursiveDiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
// x = m_invdiag.array() * b.array();
#pragma omp for
for (int i = 0; i < b.rows(); ++i)
{
x(i) = m_invdiag(i) * b(i);
}
}
template <typename Rhs>
inline const Eigen::Solve<RecursiveDiagonalPreconditioner, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
eigen_assert(m_invdiag.size() == b.rows() &&
"DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
return Eigen::Solve<RecursiveDiagonalPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
const auto& getDiagElement(int i) const { return m_invdiag(i); }
Vector m_invdiag;
protected:
bool m_isInitialized;
};
//#define RM_CG_DEBUG_OUTPUT
/**
* A conjugate gradient solver, which works for recursives matrices.
* Solve:
* A * x = b for x
*
* The matrix A is given as function (for example a lambda function).
* This way we can implement an implicit cg solver, which does not construct the full matrix A.
*
* Example call:
*
* // Build preconditioner
* RecursiveDiagonalPreconditioner<MatrixScalar<Block>> P;
* Eigen::Index iters = 50;
* Scalar tol = 1e-50;
* P.compute(S);
*
* // Solve with explicit matrix S
* DAType tmp(n);
* recursive_conjugate_gradient(
* [&](const DAType& v) {
* tmp = S * v;
* return tmp;
* },
* ej, da, P, iters, tol);
*
*/
template <typename MultFunction, typename Rhs, typename Dest, typename Preconditioner, typename SuperScalar>
EIGEN_DONT_INLINE void recursive_conjugate_gradient(const MultFunction& applyA, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Eigen::Index& iters,
SuperScalar& tol_error)
{
// Typedefs
using namespace Eigen;
using std::abs;
using std::sqrt;
typedef SuperScalar RealScalar;
typedef SuperScalar Scalar;
typedef Rhs VectorType;
// Temp Vector variables
Index n = rhs.rows();
#ifdef RM_CG_DEBUG_OUTPUT
std::cout << "Starting recursive CG" << std::endl;
std::cout << "Iterations: " << iters << std::endl;
std::cout << "Tolerance: " << tol_error << std::endl;
std::cout << "N: " << n << std::endl;
#endif
#if 0
// Create them locally
VectorType z(n);
VectorType p(n);
#else
// Use static variables so a repeated call with the same size doesn't allocate memory
static thread_local VectorType z;
static thread_local VectorType p;
static thread_local VectorType residual;
z.resize(n);
p.resize(n);
residual.resize(n);
#endif
RealScalar tol = tol_error;
Index maxIters = iters;
applyA(x, residual);
residual = rhs - residual;
RealScalar rhsNorm2 = squaredNorm(rhs);
if (rhsNorm2 == 0)
{
x.setZero();
iters = 0;
tol_error = 0;
return;
}
RealScalar threshold = tol * tol * rhsNorm2;
RealScalar residualNorm2 = squaredNorm(residual);
#ifdef RM_CG_DEBUG_OUTPUT
std::cout << "Initial residual: " << residualNorm2 << std::endl;
#endif
if (residualNorm2 < threshold)
{
iters = 0;
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
p = precond.solve(residual); // initial search direction
// the square of the absolute value of r scaled by invM
RealScalar absNew = dot(residual, p);
#ifdef RM_CG_DEBUG_OUTPUT
std::cout << "dot(r,p): " << absNew << std::endl;
#endif
Index i = 0;
while (i < maxIters)
{
// std::cout << "CG Residual " << i << ": " << residualNorm2 << std::endl;
applyA(p, z);
// the amount we travel on dir
Scalar alpha = absNew / dot(p, z);
// update solution
x += scalarMult(p, alpha);
// update residual
residual -= scalarMult(z, alpha);
residualNorm2 = squaredNorm(residual);
#ifdef RM_CG_DEBUG_OUTPUT
std::cout << "Iteration: " << i << " Residual: " << residualNorm2 << " Alpha: " << alpha << std::endl;
#endif
if (residualNorm2 < threshold) break;
z = precond.solve(residual); // approximately solve for "A z = residual"
// std::cout << expand(p).transpose() << std::endl;
RealScalar absOld = absNew;
absNew = dot(residual, z); // update the absolute value of r
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
// std::cout << "absnew " << absNew << " beta " << beta << std::endl;
p = z + scalarMult(p, beta); // update search direction
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
iters = i;
}
template <typename T>
struct alignas(64) CacheAlignedValues
{
T data;
};
template <typename T>
inline double accumulate(const T& v)
{
double d = 0;
for (auto& v : v)
{
d += v.data;
}
return d;
}
// Multi threaded implementation
template <typename MultFunction, typename Rhs, typename Dest, typename Preconditioner, typename SuperScalar>
EIGEN_DONT_INLINE void recursive_conjugate_gradient_OMP(const MultFunction& applyA, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Eigen::Index& iters,
SuperScalar& tol_error)
{
// Typedefs
using namespace Eigen;
using std::abs;
using std::sqrt;
typedef SuperScalar RealScalar;
typedef SuperScalar Scalar;
typedef Rhs VectorType;
// Temp Vector variables
Index n = rhs.rows();
// Use static variables so a repeated call with the same size doesn't allocate memory
static VectorType z;
static VectorType p;
static VectorType residual;
static std::vector<CacheAlignedValues<Scalar>> tmpResults1, tmpResults;
#pragma omp single
{
z.resize(n);
p.resize(n);
residual.resize(n);
tmpResults1.resize(omp_get_num_threads());
tmpResults.resize(omp_get_num_threads());
}
int tid = omp_get_thread_num();
RealScalar tol = tol_error;
Index maxIters = iters;
applyA(x, residual);
#pragma omp for
for (int i = 0; i < n; ++i)
{
residual(i) = rhs(i) - residual(i);
}
// tmpResults[tid] = squaredNorm_omp(rhs);
squaredNorm_omp_local(rhs, tmpResults[tid].data);
RealScalar rhsNorm2 = accumulate(tmpResults);
if (rhsNorm2 == 0)
{
// x.setZero();
#pragma omp for
for (int i = 0; i < n; ++i)
{
x(i).get().setZero();
}
iters = 0;
tol_error = 0;
maxIters = 0;
}
RealScalar threshold = tol * tol * rhsNorm2;
squaredNorm_omp_local(residual, tmpResults1[tid].data);
RealScalar residualNorm2 = accumulate(tmpResults1);
// RealScalar residualNorm2 = squaredNorm(residual);
if (residualNorm2 < threshold)
{
iters = 0;
tol_error = sqrt(residualNorm2 / rhsNorm2);
maxIters = 0;
}
p = precond.solve(residual); // initial search direction
dot_omp_local(residual, p, tmpResults[tid].data);
RealScalar absNew = accumulate(tmpResults);
Index i = 0;
while (i < maxIters)
{
// std::cout << "CG Residual " << i << ": " << residualNorm2 << std::endl;
applyA(p, z);
dot_omp_local(p, z, tmpResults1[tid].data);
Scalar dotpz = accumulate(tmpResults1);
Scalar alpha = absNew / dotpz;
#pragma omp for
for (int i = 0; i < n; ++i)
{
// the amount we travel on dir
// update solution
x(i) += p(i) * alpha;
// update residual
residual(i) -= z(i) * alpha;
}
squaredNorm_omp_local(residual, tmpResults[tid].data);
residualNorm2 = accumulate(tmpResults);
if (residualNorm2 < threshold) break;
z = precond.solve(residual); // approximately solve for "A z = residual"
RealScalar absOld = absNew;
dot_omp_local(residual, z, tmpResults[tid].data);
absNew = accumulate(tmpResults);
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
// std::cout << "absnew " << absNew << " beta " << beta << std::endl;
#pragma omp for
for (int i = 0; i < n; ++i)
{
p(i) = z(i) + p(i) * beta; // update search direction
}
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
iters = i;
} // namespace Eigen::Recursive
} // namespace Eigen::Recursive
|
modifier_view.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2016, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: David Weese <david.weese@fu-berlin.de>
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h.
// TODO(holtgrew): Move out convert()
#ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_
#define SEQAN_MODIFIER_MODIFIER_VIEW_H_
namespace seqan
{
// ==========================================================================
// Forwards
// ==========================================================================
// ==========================================================================
// Classes
// ==========================================================================
// --------------------------------------------------------------------------
// Class ModView
// --------------------------------------------------------------------------
/*!
* @class ModViewModifiedIterator
* @extends ModifiedIterator
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedIterator<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
/*!
* @class ModViewModifiedString
* @extends ModifiedString
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedString<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
template <typename TFunctor>
struct ModView {};
template <typename TFunctor>
struct ModViewCargo
{
TFunctor func;
ModViewCargo() : func()
{}
};
template <typename THost, typename TFunctor>
class ModifiedIterator<THost, ModView<TFunctor> >
{
public:
typedef typename Cargo<ModifiedIterator>::Type TCargo_;
THost _host;
TCargo_ _cargo;
mutable typename Value<ModifiedIterator>::Type tmp_value;
ModifiedIterator() : _host(), tmp_value()
{}
template <typename TOtherHost>
ModifiedIterator(ModifiedIterator<TOtherHost, ModView<TFunctor> > & origin) :
_host(origin._host), _cargo(origin._cargo), tmp_value()
{}
explicit
ModifiedIterator(THost const & host) :
_host(host), tmp_value()
{}
ModifiedIterator(THost const & host, TFunctor const & functor):
_host(host), tmp_value()
{
cargo(*this).func = functor;
}
};
// --------------------------------------------------------------------------
// Class ModifiedString
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
class ModifiedString<THost, ModView<TFunctor> >
{
public:
typedef typename Pointer_<THost>::Type THostPointer_;
typedef typename Cargo<ModifiedString>::Type TCargo_;
mutable THostPointer_ _host;
TCargo_ _cargo;
mutable typename Value<ModifiedString>::Type tmp_value;
// Default constructor.
ModifiedString() : _host(), tmp_value()
{}
// Construct with the actual host.
explicit
ModifiedString(typename Parameter_<THost>::Type host):
_host(_toPointer(host)), tmp_value()
{}
// Construct with the functor.
explicit
ModifiedString(TFunctor const & functor):
_host(), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host from a non-const host.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Construct with the actual host; variant with functor.
ModifiedString(typename Parameter_<THost>::Type host, TFunctor const & functor) :
_host(_toPointer(host)), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host with a non-const host; variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
template <typename TPos>
inline typename Reference<ModifiedString>::Type
operator[](TPos pos)
{
return value(*this, pos);
}
template <typename TPos>
inline typename Reference<ModifiedString const>::Type
operator[](TPos pos) const
{
return value(*this, pos);
}
};
// ==========================================================================
// Metafunctions
// ==========================================================================
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// --------------------------------------------------------------------------
// Metafunction Value [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef typename TFunctor::result_type TResult_;
typedef typename RemoveConst_<TResult_>::Type Type;
};
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction GetValue [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction Reference [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// NOTE(h-2): ModView element access is always by copy never by reference
// This is a workaround for dangling references to the stack when
// combining infixes and modified views, more precisely:
// if you iterate over an infix of a modview then value() on the iterator
// will return reference to the tmp_value inside the moditerator
// which might have been destructed.
// This is a more general problem that stems from the fact that
// "virtual strings" of the same type (infixes, modstrings) can be
// automatically compacted into one layer, but combinations cannot.
// This workaround happens in ModView, because it is used less frequently
// then Infixes.
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo< ModifiedString<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// ==========================================================================
// Functions
// ==========================================================================
// --------------------------------------------------------------------------
// Function getValue() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return cargo(me).func(getValue(host(me)));
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return cargo(me).func(getValue(host(me)));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
value(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return getValue(me);
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
value(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return getValue(me);
}
// --------------------------------------------------------------------------
// Function getValue() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return getValue(me, pos);
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return getValue(me, pos);
}
// --------------------------------------------------------------------------
// Function assignModViewFunctor()
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline void
assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & functor)
{
cargo(me).func = functor;
}
// --------------------------------------------------------------------------
// Function convert()
// --------------------------------------------------------------------------
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence const & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence const, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
} // namespace seqan
#endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
|
2852.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for simd num_threads(28) private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
3D.c | #include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <string.h>
#define STR_SIZE (256)
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016; float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void fatal(char *s)
{
fprintf(stderr, "Error: %s\n", s);
}
void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
fatal( "The file was not opened" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
for (k=0; k <= layers-1; k++)
{
if (fgets(str, STR_SIZE, fp) == NULL) fatal("Error reading file\n");
if (feof(fp))
fatal("not enough lines in file");
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j+k*grid_rows*grid_cols] = val;
}
fclose(fp);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
for (k=0; k < layers; k++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j+k*grid_rows*grid_cols]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void computeTempCPU(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{ float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
int c,w,e,n,s,b,t;
int x,y,z;
int i = 0;
do{
for(z = 0; z < nz; z++)
for(y = 0; y < ny; y++)
for(x = 0; x < nx; x++)
{
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx - 1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny - 1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz - 1) ? c : c + nx * ny;
tOut[c] = tIn[c]*cc + tIn[n]*cn + tIn[s]*cs + tIn[e]*ce + tIn[w]*cw + tIn[t]*ct + tIn[b]*cb + (dt/Cap) * pIn[c] + ct*amb_temp;
}
float *temp = tIn;
tIn = tOut;
tOut = temp;
i++;
}
while(i < numiter);
}
float accuracy(float *arr1, float *arr2, int len)
{
float err = 0.0;
int i;
for(i = 0; i < len; i++)
{
err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]);
}
return (float)sqrt(err/len);
}
void computeTempOMP(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
#pragma omp parallel
{
int count = 0;
float *tIn_t = tIn;
float *tOut_t = tOut;
#pragma omp master
printf("%d threads running\n", omp_get_num_threads());
do {
int z;
#pragma omp for
for (z = 0; z < nz; z++) {
int y;
for (y = 0; y < ny; y++) {
int x;
for (x = 0; x < nx; x++) {
int c, w, e, n, s, b, t;
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx-1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny-1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz-1) ? c : c + nx * ny;
tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e]
+ cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp;
}
}
}
float *t = tIn_t;
tIn_t = tOut_t;
tOut_t = t;
count++;
} while (count < numiter);
}
return;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]);
fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n");
fprintf(stderr, "\t<iteration> - number of iterations\n");
fprintf(stderr, "\t<powerFile> - name of the file containing the initial power values of each cell\n");
fprintf(stderr, "\t<tempFile> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<outputFile - output file\n");
exit(1);
}
int main(int argc, char** argv)
{
if (argc != 7)
{
usage(argc,argv);
}
char *pfile, *tfile, *ofile;// *testFile;
int iterations = atoi(argv[3]);
pfile = argv[4];
tfile = argv[5];
ofile = argv[6];
//testFile = argv[7];
int numCols = atoi(argv[1]);
int numRows = atoi(argv[1]);
int layers = atoi(argv[2]);
/* calculating parameters*/
float dx = chip_height/numRows;
float dy = chip_width/numCols;
float dz = t_chip/layers;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy;
float Rx = dy / (2.0 * K_SI * t_chip * dx);
float Ry = dx / (2.0 * K_SI * t_chip * dy);
float Rz = dz / (K_SI * dx * dy);
// cout << Rx << " " << Ry << " " << Rz << endl;
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float dt = PRECISION / max_slope;
float *powerIn, *tempOut, *tempIn, *tempCopy;// *pCopy;
// float *d_powerIn, *d_tempIn, *d_tempOut;
int size = numCols * numRows * layers;
powerIn = (float*)calloc(size, sizeof(float));
tempCopy = (float*)malloc(size * sizeof(float));
tempIn = (float*)calloc(size,sizeof(float));
tempOut = (float*)calloc(size, sizeof(float));
//pCopy = (float*)calloc(size,sizeof(float));
float* answer = (float*)calloc(size, sizeof(float));
// outCopy = (float*)calloc(size, sizeof(float));
readinput(powerIn,numRows, numCols, layers,pfile);
readinput(tempIn, numRows, numCols, layers, tfile);
memcpy(tempCopy,tempIn, size * sizeof(float));
struct timeval start, stop;
float time;
gettimeofday(&start,NULL);
computeTempOMP(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
gettimeofday(&stop,NULL);
time = (stop.tv_usec-start.tv_usec)*1.0e-6 + stop.tv_sec - start.tv_sec;
computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
float acc = accuracy(tempOut,answer,numRows*numCols*layers);
printf("Time: %.3f (s)\n",time);
printf("Accuracy: %e\n",acc);
writeoutput(tempOut,numRows, numCols, layers, ofile);
free(tempIn);
free(tempOut); free(powerIn);
return 0;
}
|
test_parallel_reduction.c | //===-- test_pareallel_reduction.c - Test reductions at parallel --*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_parallel_reduction.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "tests.h"
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int test_omp_parallel_reduction(void) {
int sum;
int known_sum;
double dsum;
double dknown_sum;
double dt = 0.5; /* base of geometric row for + and - test*/
double rounding_error = 1.E-9;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int logic_or;
int bit_and;
int bit_or;
int exclusiv_bit_or;
int logics[LOOPCOUNT];
int i;
double dpt;
int result;
sum = 0;
dsum = 0;
product = 1;
logic_and = 1;
logic_or = 0;
bit_and = 1;
bit_or = 0;
exclusiv_bit_or = 0;
result = 0;
dt = 1. / 3.;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
/* Tests for integers */
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(+ : sum)
for (i = 1; i <= LOOPCOUNT; i++) {
sum = sum + i;
}
if (known_sum != sum) {
result++;
fprintf(stderr, "Error in sum with integers: Result was %d instead of %d\n",
sum, known_sum);
}
diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(- : diff)
for (i = 1; i <= LOOPCOUNT; ++i) {
diff = diff - i;
}
if (diff != 0) {
result++;
fprintf(stderr,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
}
/* Tests for doubles */
dsum = 0;
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i) {
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(+ : dsum)
for (i = 0; i < DOUBLE_DIGITS; ++i) {
dsum += pow(dt, i);
}
if (fabs(dsum - dknown_sum) > rounding_error) {
result++;
fprintf(stderr,
"Error in sum with doubles: Result was %f instead of %f "
"(Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i) {
dpt *= dt;
}
// fprintf(stderr,"\n");
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(- : ddiff)
for (i = 0; i < DOUBLE_DIGITS; ++i) {
ddiff -= pow(dt, i);
}
if (fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,
"Error in Difference with doubles: Result was %E instead of 0.0\n",
ddiff);
}
/* Tests for product of integers */
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(* : product)
for (i = 1; i <= MAX_FACTOR; i++) {
product *= i;
}
known_product = KNOWN_PRODUCT;
if (known_product != product) {
result++;
fprintf(stderr,
"Error in Product with integers: Result was %d instead of %d\n\n",
product, known_product);
}
/* Tests for logical and */
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 1;
}
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)
for (i = 0; i < LOOPCOUNT; ++i) {
logic_and = (logic_and && logics[i]);
}
if (!logic_and) {
result++;
fprintf(stderr, "Error in logic AND part 1.\n");
}
logic_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)
for (i = 0; i < LOOPCOUNT; ++i) {
logic_and = logic_and && logics[i];
}
if (logic_and) {
result++;
fprintf(stderr, "Error in logic AND part 2.\n");
}
/* Tests for logical or */
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(|| \
: logic_or)
for (i = 0; i < LOOPCOUNT; ++i) {
logic_or = logic_or || logics[i];
}
if (logic_or) {
result++;
fprintf(stderr, "Error in logic OR part 1.\n");
}
logic_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(|| \
: logic_or)
for (i = 0; i < LOOPCOUNT; ++i) {
logic_or = logic_or || logics[i];
}
if (!logic_or) {
result++;
fprintf(stderr, "Error in logic OR part 2.\n");
}
/* Tests for bitwise and */
for (i = 0; i < LOOPCOUNT; ++i) {
logics[i] = 1;
}
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(& : bit_and)
for (i = 0; i < LOOPCOUNT; ++i) {
bit_and = (bit_and & logics[i]);
}
if (!bit_and) {
result++;
fprintf(stderr, "Error in BIT AND part 1.\n");
}
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(& : bit_and)
for (i = 0; i < LOOPCOUNT; ++i) {
bit_and = bit_and & logics[i];
}
if (bit_and) {
result++;
fprintf(stderr, "Error in BIT AND part 2.\n");
}
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
/* Tests for bitwise or */
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(| : bit_or)
for (i = 0; i < LOOPCOUNT; ++i) {
bit_or = bit_or | logics[i];
}
if (bit_or) {
result++;
fprintf(stderr, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel for schedule(dynamic, 1) private(i) reduction(| : bit_or)
for (i = 0; i < LOOPCOUNT; ++i) {
bit_or = bit_or | logics[i];
}
if (!bit_or) {
result++;
fprintf(stderr, "Error in BIT OR part 2\n");
}
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
/* Tests for bitwise xor */
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)
for (i = 0; i < LOOPCOUNT; ++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if (exclusiv_bit_or) {
result++;
fprintf(stderr, "Error in EXCLUSIVE BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)
for (i = 0; i < LOOPCOUNT; ++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if (!exclusiv_bit_or) {
result++;
fprintf(stderr, "Error in EXCLUSIVE BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result == 0);
}
int main(void) {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_parallel_reduction()) {
num_failed++;
}
}
if (num_failed == 0) {
printf("***PASSED***\n");
}
else {
printf("***FAILED***: %d failures\n", num_failed);
}
return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS;
}
|
lu.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "npb-C.h"
/* global variables */
#include "applu.h"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified);
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char class;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &class, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", class, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != iend) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != ist) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp for
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
#if defined(_OPENMP)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp for nowait
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
#pragma omp critical
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp barrier
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - LU Benchmark\n\n");
fp = fopen("inputlu.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputlu.data\n");
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d", &ipr, &inorm);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d", &itmax);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &omega);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf%lf%lf%lf%lf",
&tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0);
while(fgetc(fp) != '\n');
fclose(fp);
} else {
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
}
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp for
for (i = 0; i <= nx-1; i++) {
for (j = 0; j <= ny-1; j++) {
for (k = 0; k <= nz-1; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
#pragma omp parallel private(i,j,k,m)
{
#pragma omp for
for (i = 0; i < ISIZ1; i++) {
for (j = 0; j < ISIZ2; j++) {
for (k = 0; k < 5; k++) {
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
#pragma omp master
printf(" Time step %4d\n", istep);
}
#pragma omp parallel private(istep,i,j,k,m)
{
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
} /* end parallel */
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*class = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*class = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*class = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*class = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*class = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf("\n Verification being performed for class %1c\n", *class);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*class == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
|
GB_unop__erfc_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__erfc_fp64_fp64
// op(A') function: GB_unop_tran__erfc_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = erfc (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = erfc (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = erfc (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ERFC || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__erfc_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = erfc (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = erfc (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__erfc_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trmm_x_sky_u_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < mat->rows; i++)
for(ALPHA_INT j = 0; j < columns; j++)
alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
for (ALPHA_INT ac = 0; ac < mat->cols; ++ac)
{
ALPHA_INT start = mat->pointers[ac];
ALPHA_INT end = mat->pointers[ac + 1];
ALPHA_INT idx = 1;
ALPHA_INT eles_num = end - start;
for (ALPHA_INT ai = start; ai < end; ++ai)
{
ALPHA_INT cr = ac - eles_num + idx;
if (ac > cr)
{
ALPHA_Number t;
alpha_mul(t, alpha, mat->values[ai]);
alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]);
}
else if(ac == cr)
alpha_madde(y[index2(cr, cc, ldy)], alpha, x[index2(ac, cc, ldx)]);
idx++;
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
sum.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
enum {
N = 10000000
};
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
double sum(double *v, int low, int high)
{
if (low == high)
return v[low];
int mid = (low + high) / 2;
return sum(v, low, mid) + sum(v, mid + 1, high);
}
double sum_omp_tasks_maxthreads(double *v, int low, int high, int nthreads)
{
if (low == high)
return v[low];
if (nthreads <= 1)
return sum(v, low, high);
double sum_left, sum_right;
int mid = (low + high) / 2;
#pragma omp task shared(sum_left)
sum_left = sum_omp_tasks_maxthreads(v, low, mid, nthreads / 2);
#pragma omp task shared(sum_right)
sum_right = sum_omp_tasks_maxthreads(v, mid + 1, high, nthreads - nthreads / 2);
#pragma omp taskwait
return sum_left + sum_right;
}
double sum_omp(double *v, int low, int high)
{
double s = 0;
#pragma omp parallel
{
#pragma omp single nowait
s = sum_omp_tasks_maxthreads(v, low, high, omp_get_num_procs());
}
return s;
}
double run_serial()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum(v, 0, N - 1);
t = wtime() - t;
printf("Result (serial): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
double run_parallel()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum_omp(v, 0, N - 1);
t = wtime() - t;
printf("Result (parallel): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
int main(int argc, char **argv)
{
printf("Recursive summation N = %d\n", N);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "ext/base64/base64.h"
#include <glog/logging.h>
#include <random>
namespace dd
{
class DDImg
{
public:
DDImg() {}
~DDImg() {}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z')
|| (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9')
|| (c == '+' || c=='/' || c=='='))
return true;
else return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c: s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else return false;
}
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(),str.end());
cv::Mat img = cv::Mat(cv::imdecode(cv::Mat(vdat,true),_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR));
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Size size(_width,_height);
cv::Mat rimg;
cv::resize(img,rimg,size,0,0,CV_INTER_CUBIC);
_imgs.push_back(rimg);
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0,input.end);
size = input.tellg();
input.seekg(0,input.beg);
char* data = new char[size];
input.read(data, size);
std::string str(data,data+size);
delete[]data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname)
{
cv::Mat img = cv::imread(fname,_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR);
if (img.empty())
{
LOG(ERROR) << "empty image";
return -1;
}
LOG(INFO) << "fname:" << fname << "\n";
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Size size(_width,_height);
cv::Mat rimg;
//cv::resize(img,rimg,size,0,0,CV_INTER_CUBIC);
cv::resize(img,rimg,size,0,0,CV_INTER_LINEAR); //CV_INTER_NN CV_INTER_LINEAR CV_INTER_AREA CV_INTER_CUBIC
// modify by lg 2017-10-09 start
cv::imwrite("/home/temp.bmp",rimg);
LOG(INFO) << "read " << " images\n";
// modify by lg 2017-10-09 start
_imgs.push_back(rimg);
return 0;
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content,&ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir)
{
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir,false,true,subdirs))
throw InputConnectorBadParamException("failed reading text subdirectories in data directory " + dir);
LOG(INFO) << "imginputfileconn: list subdirs size=" << subdirs.size();
// list files and classes
std::vector<std::pair<std::string,int>> lfiles; // labeled files
std::unordered_map<int,std::string> hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while(uit!=subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit),true,false,subdir_files))
throw InputConnectorBadParamException("failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while(fit!=subdir_files.end()) // XXX: re-iterating the file is not optimal
{
lfiles.push_back(std::pair<std::string,int>((*fit),cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir,true,false,test_files);
auto fit = test_files.begin();
while(fit!=test_files.end())
{
lfiles.push_back(std::pair<std::string,int>((*fit),-1)); // -1 for no class
++fit;
}
}
// read images
cv::Size size(_width,_height);
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string,int> &p: lfiles)
{
cv::Mat img = cv::imread(p.first,_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR);
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Mat rimg;
cv::resize(img,rimg,size,0,0,CV_INTER_CUBIC);
_imgs.push_back(rimg);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
LOG(INFO) << "read " << _imgs.size() << " images\n";
}
return 0;
}
std::vector<cv::Mat> _imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int,int>> _imgs_size;
bool _bw = false;
bool _b64 = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
std::string _db_fname;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn()
:InputConnectorStrategy(){}
ImgInputFileConn(const ImgInputFileConn &i)
:InputConnectorStrategy(i),_width(i._width),_height(i._height),_bw(i._bw),_mean(i._mean),_has_mean_scalar(i._has_mean_scalar) {}
~ImgInputFileConn() {}
void init(const APIData &ad)
{
LOG(INFO) << "init->" <<"ImgInputFileConn::init(ad)"<<"\n";
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
LOG(INFO) << "init->" <<"fillup_parameters"<<"\n";
// optional parameters.
if (ad.has("width"))
{
_width = ad.get("width").get<int>();
LOG(INFO) << "ad.has width" << "\n";
}
if (ad.has("height"))
{
LOG(INFO) << "ad.has height" << "\n";
_height = ad.get("height").get<int>();
}
if (ad.has("bw"))
{
LOG(INFO) << "ad.has bw" << "\n";
_bw = ad.get("bw").get<bool>();
}
if (ad.has("shuffle"))
{
LOG(INFO) << "ad.has shuffle" << "\n";
_shuffle = ad.get("shuffle").get<bool>();
}
if (ad.has("seed"))
{
LOG(INFO) << "ad.has seed" << "\n";
_seed = ad.get("seed").get<int>();
}
if (ad.has("test_split"))
{
LOG(INFO) << "ad.has test_split" << "\n";
_test_split = ad.get("test_split").get<double>();
}
if (ad.has("mean"))
{
LOG(INFO) << "ad.has mean" << "\n";
std::vector<int> vm = ad.get("mean").get<std::vector<int>>();
if (vm.size() == 3)
{
int r,g,b;
r = vm[0];
g = vm[1];
b = vm[2];
_mean = cv::Scalar(r,g,b);
_has_mean_scalar = true;
}
else if (vm.size() == 1) // bw
{
_mean = cv::Scalar(vm.at(0));
_has_mean_scalar = true;
}
}
}
int feature_size() const
{
if (_bw) return _width*_height;
else return _width*_height*3; // RGB
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void transform(const APIData &ad)
{
get_data(ad);
if (ad.has("parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
#pragma omp parallel for
for (size_t i=0;i<_uris.size();i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg;
dimg._ctype._bw = _bw;
dimg._ctype._width = _width;
dimg._ctype._height = _height;
try
{
if (dimg.read_element(u))
{
LOG(ERROR) << "no data for image " << u;
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch(std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
_images.insert(_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
_images_size.insert(_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else uris.push_back(std::to_string(i));
}
}
if (catch_read)
throw InputConnectorBadParamException(catch_msg);
_uris = uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(),_images.end(),g); //XXX beware: labels are not shuffled, i.e. let's not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0-_test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while(chit!=_images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else ++cpos;
++chit;
}
_images.erase(dchit,_images.end());
LOG(INFO) << "data split test size=" << _test_images.size() << " / remaining data size=" << _images.size();
}
if (_images.empty())
throw InputConnectorBadParamException("no image could be found");
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int,int>> _images_size;
// image parameters
int _width = 224;
int _height = 224;
bool _bw = false; /**< whether to convert to black & white. */
double _test_split = 0.0; /**< auto-split of the dataset. */
bool _shuffle = false; /**< whether to shuffle the dataset, usually before splitting. */
int _seed = -1; /**< shuffling seed. */
cv::Scalar _mean; /**< mean image pixels, to be subtracted from images. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
};
}
#include "caffeinputconns.h"
#ifdef USE_TF
#include "tfinputconns.h"
#endif
#endif
|
fannkuchredux.gcc-5.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// Based on the Ada program by Jonathan Parker and Georg Bauhaus which in turn
// was based on code by Dave Fladebo, Eckehard Berns, Heiner Marxen, Hongwei Xi,
// and The Anh Tran and also the Java program by Oleg Mazurov.
// This value controls how many blocks the workload is broken up into (as long
// as the value is less than or equal to the factorial of the argument to this
// program) in order to allow the blocks to be processed in parallel if
// possible. PREFERRED_NUMBER_OF_BLOCKS_TO_USE should be some number which
// divides evenly into all factorials larger than it. It should also be around
// 2-8 times the amount of threads you want to use in order to create enough
// blocks to more evenly distribute the workload amongst the threads.
#define PREFERRED_NUMBER_OF_BLOCKS_TO_USE 12
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
int main(int argc, char ** argv){
const intnative_t n=atoi(argv[1]);
// Create and initialize factorial_Lookup_Table.
intnative_t factorial_Lookup_Table[n+1];
factorial_Lookup_Table[0]=1;
for(intnative_t i=0; ++i<=n;)
factorial_Lookup_Table[i]=i*factorial_Lookup_Table[i-1];
// Determine the block_Size to use. If n! is less than
// PREFERRED_NUMBER_OF_BLOCKS_TO_USE then just use a single block to prevent
// block_Size from being set to 0. This also causes smaller values of n to
// be computed serially which is faster and uses less resources for small
// values of n.
const intnative_t block_Size=factorial_Lookup_Table[n]/
(factorial_Lookup_Table[n]<PREFERRED_NUMBER_OF_BLOCKS_TO_USE ?
1 : PREFERRED_NUMBER_OF_BLOCKS_TO_USE);
intnative_t maximum_Flip_Count=0, checksum=0;
// Iterate over each block.
#pragma omp parallel for \
reduction(max:maximum_Flip_Count) reduction(+:checksum)
for(intnative_t initial_Permutation_Index_For_Block=0;
initial_Permutation_Index_For_Block<factorial_Lookup_Table[n];
initial_Permutation_Index_For_Block+=block_Size){
intnative_t count[n];
int8_t temp_Permutation[n], current_Permutation[n];
// Initialize count and current_Permutation.
count[0]=0;
for(intnative_t i=0; i<n; ++i)
current_Permutation[i]=i;
for(intnative_t i=n-1,
permutation_Index=initial_Permutation_Index_For_Block; i>0; --i){
const intnative_t d=permutation_Index/factorial_Lookup_Table[i];
permutation_Index=permutation_Index%factorial_Lookup_Table[i];
count[i]=d;
for(intnative_t j=0; j<n; ++j)
temp_Permutation[j]=current_Permutation[j];
for(intnative_t j=0; j<=i; ++j)
current_Permutation[j]= j+d<=i ?
temp_Permutation[j+d] : temp_Permutation[j+d-i-1];
}
// Iterate over each permutation in the block.
const intnative_t last_Permutation_Index_In_Block=
initial_Permutation_Index_For_Block+block_Size-1;
for(intnative_t permutation_Index=initial_Permutation_Index_For_Block; ;
++permutation_Index){
// If the first value in the current_Permutation is not 1 (0) then
// we will need to do at least one flip for the current_Permutation.
if(current_Permutation[0]>0){
// Make a copy of current_Permutation[] to work on. Note that we
// don't need to copy the first value since that will be stored
// in a separate variable since it gets used a lot.
for(intnative_t i=0; ++i<n;)
temp_Permutation[i]=current_Permutation[i];
intnative_t flip_Count=1;
// Flip temp_Permutation until the element at the first_Value
// index is 1 (0).
for(intnative_t first_Value=current_Permutation[0];
temp_Permutation[first_Value]>0; ++flip_Count){
// Record the new_First_Value and restore the old
// first_Value at its new flipped position.
const int8_t new_First_Value=temp_Permutation[first_Value];
temp_Permutation[first_Value]=first_Value;
// If first_Value is greater than 3 (2) then we are flipping
// a series of four or more values so we will also need to
// flip additional elements in the middle of the
// temp_Permutation.
if(first_Value>2){
intnative_t low_Index=1, high_Index=first_Value-1;
// Note that this loop is written so that it will run at
// most 16 times so that compilers will be more willing
// to unroll it. Consequently this won't work right when
// n is greater than 35. This would probably be the
// least of your concerns since 21! won't fit into 64
// bit integers and even if it did you probably wouldn't
// want to run this program with a value that large
// since it would take thousands of years to do on a
// modern desktop computer. ;-)
do{
const int8_t temp=temp_Permutation[high_Index];
temp_Permutation[high_Index]=
temp_Permutation[low_Index];
temp_Permutation[low_Index]=temp;
}while(low_Index+++3<=high_Index-- && low_Index<16);
}
// Update first_Value to new_First_Value that we recorded
// earlier.
first_Value=new_First_Value;
}
// Update the checksum.
if(permutation_Index%2==0)
checksum+=flip_Count;
else
checksum-=flip_Count;
// Update maximum_Flip_Count if necessary.
if(flip_Count>maximum_Flip_Count)
maximum_Flip_Count=flip_Count;
}
// Break out of the loop when we get to the
// last_Permutation_Index_In_Block.
if(permutation_Index>=last_Permutation_Index_In_Block)
break;
// Generate the next permutation.
int8_t first_Value=current_Permutation[1];
current_Permutation[1]=current_Permutation[0];
current_Permutation[0]=first_Value;
for(intnative_t i=1; ++count[i]>i;){
count[i++]=0;
const int8_t new_First_Value=current_Permutation[0]=
current_Permutation[1];
for(intnative_t j=0; ++j<i;)
current_Permutation[j]=current_Permutation[j+1];
current_Permutation[i]=first_Value;
first_Value=new_First_Value;
}
}
}
// Output the results to stdout.
printf("%jd\nPfannkuchen(%jd) = %jd\n", (intmax_t)checksum, (intmax_t)n,
(intmax_t)maximum_Flip_Count);
return 0;
}
|
implicit_task_data.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// This test checks that values stored in task_data in a barrier_begin event
// are still present in the corresponding barrier_end event.
// Therefore, callback implementations different from the ones in callback.h are necessary.
// This is a test for an issue reported in
// https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39
#define _BSD_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_thread_data_t ompt_get_thread_data;
int main()
{
#pragma omp parallel num_threads(4)
{
#pragma omp master
{
sleep(1);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// worker thread implicit barrier at parallel end
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
return 0;
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
task_data->value = ompt_get_unique_id();
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64
": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
#define register_ompt_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_ompt_callback(name) register_ompt_callback_t(name, name##_t)
int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num,
ompt_data_t *tool_data) {
ompt_set_callback_t ompt_set_callback;
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
register_ompt_callback(ompt_callback_sync_region);
register_ompt_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_ompt_callback(ompt_callback_thread_begin);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
|
GB_nvec_nonempty.c | //------------------------------------------------------------------------------
// GB_nvec_nonempty: count the number of non-empty vectors
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All pending tuples are ignored. If a vector has all zombies it is still
// counted as non-empty.
#include "GB.h"
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
int64_t GB_nvec_nonempty // return # of non-empty vectors
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
if (GB_IS_FULL (A) || GB_IS_BITMAP (A))
{
// A is full or bitmap; nvec_nonempty depends only on the dimensions
return ((A->vlen == 0) ? 0 : A->vdim) ;
}
if (GB_NNZ (A) == 0)
{
// A is sparse or hypersparse, with no entries
return (0) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// count the non-empty columns
//--------------------------------------------------------------------------
int64_t nvec_nonempty = 0 ;
const int64_t *restrict Ap = A->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nvec_nonempty)
for (k = 0 ; k < anvec ; k++)
{
if (Ap [k] < Ap [k+1]) nvec_nonempty++ ;
}
ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (nvec_nonempty) ;
}
|
matrix.c |
#include "matrix.h"
/*
* matrix.c
*
* Copyright (c) 2014, Rafat Hussain
* License : BSD 3-Clause
* See COPYRIGHT for more details
*/
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
#pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) BLOCKSIZE;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) BLOCKSIZE * 16;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and trye again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
|
convolutiondepthwise_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * 9;
int *outptr = out;
const signed char *img0 = bottom_blob.channel(p);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * 9;
int *outptr = out;
const signed char *img0 = bottom_blob.channel(p);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
task_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
typedef void *omp_depend_t;
typedef __UINTPTR_TYPE__ omp_event_handle_t;
void foo();
// CHECK-LABEL: @main
int main() {
omp_depend_t d, x;
omp_event_handle_t evt;
int a;
// CHECK: [[D_ADDR:%.+]] = alloca i8*,
// CHECK: [[X_ADDR:%.+]] = alloca i8*,
// CHECK: [[EVT_ADDR:%.+]] = alloca i64,
// CHECK: [[A_ADDR:%.+]] = alloca i32,
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(
// CHECK: [[ALLOC:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 [[GTID]], i32 65, i64 48, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, [[PRIVATES_TY:%.+]]*)* [[TASK_ENTRY:@.+]] to i32 (i32, i8*)*))
// CHECK: [[EVT_VAL:%.+]] = call i8* @__kmpc_task_allow_completion_event(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[ALLOC]])
// CHECK: [[CAST_EVT_VAL:%.+]] = ptrtoint i8* [[EVT_VAL]] to i64
// CHECK: store i64 [[CAST_EVT_VAL]], i64* [[EVT_ADDR]],
// CHECK: [[DATA:%.+]] = bitcast i8* [[ALLOC]] to [[PRIVATES_TY]]*
// CHECK: [[D:%.+]] = load i8*, i8** [[D_ADDR]],
// CHECK: [[D_DEP:%.+]] = bitcast i8* [[D]] to %struct.kmp_depend_info*
// CHECK: [[D_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP]], i{{.+}} -1
// CHECK: [[D_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP_BASE]], i{{.+}} 0, i{{.+}} 0
// CHECK: [[SIZE1:%.+]] = load i64, i64* [[D_DEP_BASE_SIZE]],
// CHECK: [[SIZE:%.+]] = add nuw i64 0, [[SIZE1]]
// CHECK: [[X:%.+]] = load i8*, i8** [[X_ADDR]],
// CHECK: [[X_DEP:%.+]] = bitcast i8* [[X]] to %struct.kmp_depend_info*
// CHECK: [[X_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP]], i{{.+}} -1
// CHECK: [[X_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP_BASE]], i{{.+}} 0, i{{.+}} 0
// CHECK: [[SIZE2:%.+]] = load i64, i64* [[X_DEP_BASE_SIZE]],
// CHECK: [[SIZE3:%.+]] = add nuw i64 [[SIZE]], [[SIZE2]]
// CHECK: [[SIZE:%.+]] = add nuw i64 [[SIZE3]], 1
// CHECK: [[SIZE32:%.+]] = trunc i64 [[SIZE]] to i32
// CHECK: [[SIZE64:%.+]] = zext i32 [[SIZE32]] to i64
// CHECK: [[SV:%.+]] = call i8* @llvm.stacksave()
// CHECK: store i8* [[SV]], i8** [[SV_ADDR:%.+]],
// CHECK: [[VLA:%.+]] = alloca %struct.kmp_depend_info, i64 [[SIZE64]],
// CHECK: [[VLA0:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 0
// CHECK: [[BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 0
// CHECK: [[A_ADDR_CAST:%.+]] = ptrtoint i32* [[A_ADDR]] to i64
// CHECK: store i64 [[A_ADDR_CAST]], i64* [[BASE_ADDR]],
// CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 1
// CHECK: store i64 4, i64* [[SIZE_ADDR]],
// CHECK: [[FLAGS_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 2
// CHECK: store i8 1, i8* [[FLAGS_ADDR]],
// CHECK: [[VLA_D:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 1
// CHECK: [[D_SIZE:%.+]] = mul nuw i64 24, [[SIZE1]]
// CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_D]] to i8*
// CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[D_DEP]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i64 [[D_SIZE]], i1 false)
// CHECK: [[VLA_X:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA_D]], i64 [[SIZE1]]
// CHECK: [[X_SIZE:%.+]] = mul nuw i64 24, [[SIZE2]]
// CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_X]] to i8*
// CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[X_DEP]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i64 [[X_SIZE]], i1 false)
// CHECK: [[BC:%.+]] = bitcast %struct.kmp_depend_info* [[VLA]] to i8*
// CHECK: call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[ALLOC]], i32 [[SIZE32]], i8* [[BC]], i32 0, i8* null)
// CHECK: [[SV:%.+]] = load i8*, i8** [[SV_ADDR]],
// CHECK: call void @llvm.stackrestore(i8* [[SV]])
#pragma omp task depend(in: a) depend(depobj: d, x) detach(evt)
{
#pragma omp taskgroup
{
#pragma omp task
foo();
}
}
// CHECK: ret i32 0
return 0;
}
// CHECK: call void @__kmpc_taskgroup(
// CHECK: call i8* @__kmpc_omp_task_alloc(
// CHECK: call i32 @__kmpc_omp_task(
// CHECK: call void @__kmpc_end_taskgroup(
// CHECK-LINE: @bar
void bar() {
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp for
for (int i = 0; i < 10; ++i)
// CHECK: [[BUF:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i32 1, i64 48,
// CHECK: [[BC_BUF:%.+]] = bitcast i8* [[BUF]] to [[TT_WITH_PRIVS:%.+]]*
// CHECK: [[PRIVS:%.+]] = getelementptr inbounds [[TT_WITH_PRIVS]], [[TT_WITH_PRIVS]]* [[BC_BUF]], i32 0, i32 1
// CHECK: [[I_PRIV:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}} [[PRIVS]], i32 0, i32 0
// CHECK: store i32 %{{.+}}, i32* [[I_PRIV]],
// CHECK: = call i32 @__kmpc_omp_task(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i8* [[BUF]])
#pragma omp task
++i;
}
#endif
|
28_omp_softcounter_stack.c | // clang-format off
// RUN: %run %s -o -O0 --omp -typeart-filter-pointer-alloca=false 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN
// RUN: %run %s -o -O0 --omp -typeart-filter-pointer-alloca=false 2>&1 | %filecheck %s
// REQUIRES: openmp && softcounter
// clang-format on
#include <stdlib.h>
void foo() {
double d[32];
float f[32];
}
void ptr(const int n) {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
for (int i = 1; i <= n; i++) {
foo();
}
#pragma omp section
for (int i = 1; i <= n; i++) {
foo();
}
}
}
int main(int argc, char** argv) {
const int n = 100;
ptr(n);
// CHECK-TSAN-NOT: ThreadSanitizer
// CHECK: [Trace] TypeART Runtime Trace
// TODO: CHECK-NOT: [Error] - Currently we get 'Pointer already in map' followed by 'Free on unregistered address'
// CHECK: Alloc Stats from softcounters
// CHECK-NEXT: Total heap : 0 , 0 , -
// CHECK: Total stack : {{[0-9]+}} , 400 , -
// CHECK-NEXT: Total global : {{[0-9]+}} , {{[0-9]+}} , -
// CHECK-NEXT: Max. Heap Allocs : 0 , - , -
// CHECK-NEXT: Max. Stack Allocs : 17 , - , -
// CHECK-NEXT: Addresses checked : 0 , - , -
// CHECK-NEXT: Distinct Addresses checked : 0 , - , -
// CHECK-NEXT: Addresses re-used : 2 , - , -
// CHECK-NEXT: Addresses missed : 0 , - , -
// CHECK-NEXT: Distinct Addresses missed : 0 , - , -
// CHECK-NEXT: Total free heap : 0 , 0 , -
// CHECK-NEXT: Total free stack : 423 , 400 , -
// CHECK-NEXT: OMP Stack/Heap/Free : {{[0-9]+}} , 0 , 0
// CHECK-NEXT: Null/Zero/NullZero Addr : 0 , 0 , 0
// CHECK-NEXT: User-def. types : 0 , - , -
// CHECK-NEXT: Estimated memory use (KiB) : {{[0-9]+}} , - , -
// CHECK-NEXT: Bytes per node map/stack : 96 , 8 , -
// CHECK-NEXT: {{(#|-)+}}
// CHECK-NEXT: Allocation type detail (heap, stack, global)
// CHECK: {{(#|-)+}}
// CHECK-NEXT: Free allocation type detail (heap, stack)
// CHECK: 5 : 0 , 200 , float
// CHECK: 6 : 0 , 200 , double
// CHECK: Per-thread counter values (2 threads)
// CHECK-NEXT: Thread Heap Allocs : 0 , 0
// CHECK-NEXT: Thread Heap Arrays : 0 , 0
// CHECK-NEXT: Thread Heap Allocs Free : 0 , 0
// CHECK-NEXT: Thread Heap Arrays Free : 0 , 0
// CHECK-NEXT: Thread Stack Allocs : {{[0-9]+}} , {{[0-9]+}}
// CHECK-NEXT: Thread Stack Arrays : 200 , 200
// CHECK-NEXT: Thread Max. Stack Allocs : {{[0-9]+}} , {{[0-9]+}}
// CHECK-NEXT: Thread Stack Allocs Free : {{[0-9]+}} , {{[0-9]+}}
// CHECK-NEXT: Thread Stack Array Free : 200 , 200
return 0;
} |
rawMD4_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* Copyright (c) 2011, 2012 by magnum
*
* Use of Bartavelle's mmx/sse2/intrinsics and reduced binary size by
* magnum in 2011-2012.
*
* OMP added May 2013, JimF
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawMD4;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawMD4);
#else
#include <string.h>
#include "arch.h"
#include "md4.h"
#include "common.h"
#include "formats.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifdef MMX_COEF
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "sse-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD4"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD4 " MD4_ALGORITHM_NAME
#ifdef MMX_COEF
# define NBKEYS (MMX_COEF * MD4_SSE_PARA)
# define DO_MMX_MD4(in,out,n) SSEmd4body(in, (unsigned int*)out, NULL, SSEi_MIXED_IN)
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifndef MD4_BUF_SIZ
#define MD4_BUF_SIZ 16
#endif
#define CIPHERTEXT_LENGTH 32
#define DIGEST_SIZE 16
#define BINARY_SIZE 16 // source()
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define FORMAT_TAG "$MD4$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"8a9d093f14f8701df17732b2bb182c74", "password"},
{FORMAT_TAG "6d78785c44ea8dfa178748b245d8c3ae", "magnum" },
{"6d78785c44ea8dfa178748b245d8c3ae", "magnum" },
{FORMAT_TAG "31d6cfe0d16ae931b73c59d7e0c089c0", "" },
{FORMAT_TAG "934eb897904769085af8101ad9dabca2", "John the ripper" },
{FORMAT_TAG "cafbb81fb64d9dd286bc851c4c6e0d21", "lolcode" },
{FORMAT_TAG "585028aa0f794af812ee3be8804eb14a", "123456" },
{FORMAT_TAG "23580e2a459f7ea40f9efa148b63cafb", "12345" },
{FORMAT_TAG "2ae523785d0caf4d2fb557c12016185c", "123456789" },
{FORMAT_TAG "f3e80e83b29b778bc092bf8a7c6907fe", "iloveyou" },
{FORMAT_TAG "4d10a268a303379f224d8852f2d13f11", "princess" },
{FORMAT_TAG "bf75555ca19051f694224f2f5e0b219d", "1234567" },
{FORMAT_TAG "41f92cf74e3d2c3ba79183629a929915", "rockyou" },
{FORMAT_TAG "012d73e0fab8d26e0f4d65e36077511e", "12345678" },
{FORMAT_TAG "0ceb1fd260c35bd50005341532748de6", "abc123" },
{NULL}
};
#ifdef MMX_COEF
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*MD4_BUF_SIZ*4*MMX_COEF )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef MMX_COEF
static ARCH_WORD_32 (*saved_key)[MD4_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
#else
static int (*saved_key_length);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
#ifndef MMX_COEF
saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#else
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F) {
if (*q >= 'A' && *q <= 'F') /* support lowercase only */
return 0;
q++;
}
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
return out;
}
static void *binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
p = ciphertext + TAG_LENGTH;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#ifdef MMX_COEF
#define HASH_OFFSET (index&(MMX_COEF-1))+((index%NBKEYS)/MMX_COEF)*MMX_COEF*4
static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; }
#endif
static void clear_keys(void)
{
}
#ifdef MMX_COEF
static void set_key(char *_key, int index)
{
const ARCH_WORD_32 *key = (ARCH_WORD_32*)_key;
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*MD4_BUF_SIZ*MMX_COEF];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80 << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
keybuffer[14*MMX_COEF] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_key_length[index] = len;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef MMX_COEF
static char *get_key(int index)
{
static char out[PLAINTEXT_LENGTH + 1];
unsigned int i;
ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[14*MMX_COEF + (index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*MD4_BUF_SIZ*MMX_COEF] >> 3;
for(i=0;i<len;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char*)out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_key_length[index]] = 0;
return saved_key[index];
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; ++index)
#endif
{
#if MMX_COEF
DO_MMX_MD4(saved_key[index], crypt_key[index], total_len[index]);
#else
MD4_CTX ctx;
MD4_Init(&ctx);
MD4_Update(&ctx, saved_key[index], saved_key_length[index]);
MD4_Final((unsigned char *)crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
#ifdef MMX_COEF
if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++)
if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF+i*MMX_COEF])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *source(char *source, void *binary)
{
static char Buf[CIPHERTEXT_LENGTH + TAG_LENGTH + 1];
unsigned char *cpi;
char *cpo;
int i;
strcpy(Buf, FORMAT_TAG);
cpo = &Buf[TAG_LENGTH];
cpi = (unsigned char*)(binary);
for (i = 0; i < BINARY_SIZE; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
struct fmt_main fmt_rawMD4 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__isfinite_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fc32)
// op(A') function: GB (_unop_tran__isfinite_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = GB_cisfinitef (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisfinitef (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = GB_cisfinitef (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include <memory>
#include "hist_util.h"
namespace xgboost {
namespace common {
class ColumnMatrix;
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base.
Different types of column index for each column allow
to reduce the memory usage. */
template <typename BinIdxType>
class Column {
public:
Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base)
: type_(type),
index_(index),
index_base_(index_base) {}
uint32_t GetGlobalBinIdx(size_t idx) const {
return index_base_ + static_cast<uint32_t>(index_[idx]);
}
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
const uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
ColumnType GetType() const { return type_; }
/* returns number of elements in column */
size_t Size() const { return index_.size(); }
private:
/* type of column */
ColumnType type_;
/* bin indexes in range [0, max_bins - 1] */
common::Span<const BinIdxType> index_;
/* bin index offset for specific feature */
const uint32_t index_base_;
};
template <typename BinIdxType>
class SparseColumn: public Column<BinIdxType> {
public:
SparseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, common::Span<const size_t> row_ind)
: Column<BinIdxType>(type, index, index_base),
row_ind_(row_ind) {}
const size_t* GetRowData() const { return row_ind_.data(); }
size_t GetRowIdx(size_t idx) const {
return row_ind_.data()[idx];
}
private:
/* indexes of rows */
common::Span<const size_t> row_ind_;
};
template <typename BinIdxType>
class DenseColumn: public Column<BinIdxType> {
public:
DenseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, const std::vector<bool>& missing_flags,
size_t feature_offset)
: Column<BinIdxType>(type, index, index_base),
missing_flags_(missing_flags),
feature_offset_(feature_offset) {}
bool IsMissing(size_t idx) const { return missing_flags_[feature_offset_ + idx]; }
private:
/* flags for missing values in dense columns */
const std::vector<bool>& missing_flags_;
size_t feature_offset_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (int32_t fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
bool all_dense = gmat.IsDense();
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
all_dense = false;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
feature_offsets_.resize(nfeature + 1);
size_t accum_index_ = 0;
feature_offsets_[0] = accum_index_;
for (int32_t fid = 1; fid < nfeature + 1; ++fid) {
if (type_[fid - 1] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid - 1];
}
feature_offsets_[fid] = accum_index_;
}
SetTypeSize(gmat.max_num_bins);
index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0);
if (!all_dense) {
row_ind_.resize(feature_offsets_[nfeature]);
}
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature);
any_missing_ = !noMissingValues;
if (noMissingValues) {
missing_flags_.resize(feature_offsets_[nfeature], false);
} else {
missing_flags_.resize(feature_offsets_[nfeature], true);
}
// pre-fill index_ for dense columns
if (all_dense) {
BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize();
if (gmat_bin_size == kUint8BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues);
} else if (gmat_bin_size == kUint16BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues);
} else {
CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize);
SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues);
}
/* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize
but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else {
if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
}
}
}
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_num_bins) {
if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) {
bins_type_size_ = kUint8BinsTypeSize;
} else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) {
bins_type_size_ = kUint16BinsTypeSize;
} else {
bins_type_size_ = kUint32BinsTypeSize;
}
}
/* Fetch an individual column. This code should be used with type swith
to determine type of bin id's */
template <typename BinIdxType>
std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const {
CHECK_EQ(sizeof(BinIdxType), bins_type_size_);
const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature
const size_t column_size = feature_offsets_[fid + 1] - feature_offset;
common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>(
&index_[feature_offset * bins_type_size_]),
column_size };
std::unique_ptr<const Column<BinIdxType> > res;
if (type_[fid] == ColumnType::kDenseColumn) {
res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
missing_flags_, feature_offset));
} else {
res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
{&row_ind_[feature_offset], column_size}));
}
return res;
}
template<typename T>
inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow,
const size_t nfeature, const bool noMissingValues) {
T* local_index = reinterpret_cast<T*>(&index_[0]);
/* missing values make sense only for column with type kDenseColumn,
and if no missing values were observed it could be handled much faster. */
if (noMissingValues) {
#pragma omp parallel for num_threads(omp_get_max_threads())
for (omp_ulong rid = 0; rid < nrow; ++rid) {
const size_t ibegin = rid*nfeature;
const size_t iend = (rid+1)*nfeature;
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const size_t idx = feature_offsets_[j];
local_index[idx + rid] = index[i];
}
}
} else {
/* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
fid = inst[j].index;
const size_t idx = feature_offsets_[fid];
/* rbegin allows to store indexes from specific SparsePage batch */
local_index[idx + rbegin + rid] = index[i];
missing_flags_[idx + rbegin + rid] = false;
}
}
rbegin += batch.Size();
}
}
}
template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nrow, const size_t nfeature) {
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
T* local_index = reinterpret_cast<T*>(&index_[0]);
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
size_t fid = 0;
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const uint32_t bin_id = index[i];
fid = inst[j].index;
if (type_[fid] == kDenseColumn) {
T* begin = &local_index[feature_offsets_[fid]];
begin[rid + rbegin] = bin_id - index_base_[fid];
missing_flags_[feature_offsets_[fid] + rid + rbegin] = false;
} else {
T* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin;
++num_nonzeros[fid];
}
}
}
rbegin += batch.Size();
}
}
const BinTypeSize GetTypeSize() const {
return bins_type_size_;
}
// This is just an utility function
const bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row;
}
// And this returns part of state
const bool AnyMissing() const {
return any_missing_;
}
private:
std::vector<uint8_t> index_;
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
std::vector<size_t> row_ind_;
/* indicate where each column's index and row_ind is stored. */
std::vector<size_t> feature_offsets_;
// index_base_[fid]: least bin id for feature fid
uint32_t* index_base_;
std::vector<bool> missing_flags_;
BinTypeSize bins_type_size_;
bool any_missing_;
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
|
sieve.c | /**
* @file sieve.c
* @author Austin Gill (atgill@protonmail.com)
* @brief Implementation of the Sieve of Eratosthenes.
*/
#include "sieve.h"
bool* allocate( size_t length )
{
bool* a = malloc( length * sizeof( bool ) );
if( a == NULL )
{
fprintf( stderr, "Failed to allocate %zu bytes\n", length * sizeof( bool ) );
exit( 1 );
}
// Initialize the sieve with every number marked as prime.
memset( a, true, length );
return a;
}
double prime_sieve( bool* sieve, size_t length )
{
double begin = omp_get_wtime();
// Use a dynamic schedule because some iterations will take longer than others.
#pragma omp parallel for num_threads( omp_get_num_procs() ) schedule( dynamic )
for( size_t i = 2; i < ( size_t )( sqrt( (double)length ) + 1.5 ); ++i )
{
if( sieve[i] )
{
// Mark all multiples of i as composite
for( size_t j = i * i; j < length; j += i )
{
sieve[j] = false;
}
}
}
double end = omp_get_wtime();
// Convert seconds to milliseconds
return ( end - begin ) * 1000;
}
void print_primes( const bool* sieve, size_t length )
{
size_t primes = 0;
for( size_t i = 0; i < length; ++i )
{
if( sieve[i] )
{
if( primes % 10 == 0 )
{
printf( "\n%zu:\t", primes );
}
++primes;
printf( "%zu\t", i );
}
}
printf( "\n" );
}
|
pcs.c | /** @file pcs.c
* @brief Functions relative to the Parallel Collision Search algorithm.
*
* Created by Monika Trimoska on 03/12/2015.
* Copyright © 2015 Monika Trimoska. All rights reserved.
*/
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <gmp.h>
#include <omp.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <sys/time.h>
#include "pcs_elliptic_curve_operations.h"
#include "pcs_pollard_rho.h"
#include "pcs_storage.h"
#include "pcs.h"
elliptic_curve_t E;
point_t P;
point_t Q;
mpz_t n;
mpz_t *A;
mpz_t *B;
point_t M[__NB_ENSEMBLES__];
uint8_t trailling_bits;
uint8_t nb_bits;
/** Determines whether a point is a distinguished one.
*
* @param[in] R A point on an elliptic curve.
* @param[in] trailling_bits Number of trailling zero bits in a ditinguished point.
* @param[out] q The x-coordinate, without the trailling zeros.
* @return 1 if the point is distinguished, 0 otherwise.
*/
int is_distinguished(point_t R, int trailling_bits, mpz_t *q)
{
int res;
mpz_t r;
mpz_inits(r, NULL);
mpz_tdiv_qr_ui(*q, r, R.x, (unsigned long int)pow(2, trailling_bits));
res=(mpz_sgn(r) == 0);
mpz_clears(r, NULL);
return (res);
}
/** Checks if the linear combination aP+bQ is equal to R or its inverse.
*
* @param[in] R A point on an elliptic curve.
* @param[in] a a coefficient.
* @param[in] b b coefficient.
* @return 1 if aP+bQ = R, 0 if aP+bQ = -R.
*/
int same_point(point_t R, mpz_t a, mpz_t b)
{
int res;
point_t *S1, *S2, *S;
if(!preallocation_init_done)
{
preallocation_init();
}
S1 = &(temp_point[omp_get_thread_num()][1]);
S2 = &(temp_point[omp_get_thread_num()][2]);
S = &(temp_point[omp_get_thread_num()][3]);
double_and_add(S1, P, a, E);
double_and_add(S2, Q, b, E);
add(S, *S1, *S2, E);
res=(mpz_cmp(R.y, S->y) == 0);
return res;
}
/** Computes the linear combination aP+bQ on E.
*
* @param[out] R Resulting point.
* @param[in] a a coefficient.
* @param[in] b b coefficient.
*/
void lin_comb(point_t * R, mpz_t a, mpz_t b)
{
point_t *S1, *S2;
if(!preallocation_init_done)
{
preallocation_init();
}
S1 = &(temp_point[omp_get_thread_num()][1]);
S2 = &(temp_point[omp_get_thread_num()][2]);
double_and_add(S1, P, a, E);
double_and_add(S2, Q, b, E);
add(R, *S1, *S2, E);
}
/** Checks if there is a collision.
*
*/
int is_collision(mpz_t x, mpz_t a1, mpz_t a2, int trailling_bits)
{
uint8_t r;
mpz_t *xDist_;
int retval = 0;
mpz_t *b1, *b2;
point_t *R;
if(!preallocation_init_done)
{
preallocation_init();
}
b1 = &(temp_obj[omp_get_thread_num()][9]);
b2 = &(temp_obj[omp_get_thread_num()][10]);
xDist_ = &(temp_obj[omp_get_thread_num()][11]);
R = &(temp_point[omp_get_thread_num()][4]);
mpz_set_ui(*b2, 0);
mpz_set_ui(*b1, 0);
double_and_add(R, P, a1, E);
//recompute first a,b pair
while(!is_distinguished(*R, trailling_bits, xDist_))
{
r = hash(R->y);
compute_a(a1, A[r], n);
compute_b(*b1, B[r], n);
f(*R, M[r], R, E);
}
//recompute second a,b pair
double_and_add(R, P, a2, E);
while(!is_distinguished(*R, trailling_bits, xDist_))
{
r = hash(R->y);
compute_a(a2, A[r], n);
compute_b(*b2, B[r], n);
f(*R, M[r], R, E);
}
if(mpz_cmp(*b1, *b2) != 0) //we found two different pairs, so collision
{
if(!same_point(*R, a1, *b1)) //it's the inverse point
{
mpz_neg(a2, a2);
mpz_mmod(a2, a2, n);
mpz_neg(*b2, *b2);
mpz_mmod(*b2, *b2, n);
}
compute_x(x, a1, a2, *b1, *b2, n);
retval = 1;
}
return retval;
}
/** Initialize all variables needed to do a PCS algorithm.
*
*/
void pcs_init(point_t P_init, point_t Q_init, elliptic_curve_t E_init, mpz_t n_init, mpz_t *A_init, mpz_t *B_init, uint8_t nb_bits_init, uint8_t trailling_bits_init, int type_struct, int nb_threads, uint8_t level)
{
uint8_t i;
point_init(&P);
point_init(&Q);
curve_init(&E);
mpz_init(n);
mpz_set(P.x, P_init.x);
mpz_set(P.y, P_init.y);
mpz_set(P.z, P_init.z);
mpz_set(Q.x, Q_init.x);
mpz_set(Q.y, Q_init.y);
mpz_set(Q.z, Q_init.z);
mpz_set(E.A, E_init.A);
mpz_set(E.B, E_init.B);
mpz_set(E.p, E_init.p);
mpz_set(n, n_init);
A = A_init;
B = B_init;
for(i=0; i<__NB_ENSEMBLES__; i++)
{
mpz_inits(M[i].x,M[i].y,M[i].z,NULL);
lin_comb(&M[i],A[i],B[i]);
}
trailling_bits = trailling_bits_init;
nb_bits = nb_bits_init;
struct_init(type_struct, n, trailling_bits, nb_bits, nb_threads, level);
}
/** Run the PCS algorithm.
*
*/
long long int pcs_run(mpz_t x_res, int nb_threads, int nb_collisions)
{
point_t R;
mpz_t a, a2;
mpz_t x, xDist;
uint8_t r;
int trail_length;
int trail_length_max = pow(2, trailling_bits) * 20;
int collision_count = 0;
char xDist_str[50];
#pragma omp parallel private(R, a, a2, x, r, xDist, xDist_str, trail_length) shared(collision_count, x_res, trail_length_max) num_threads(nb_threads)
{
point_init(&R);
mpz_inits(x, a2, a, xDist, NULL);
//Initialize a starting point
gmp_randstate_t r_state;
gmp_randinit_default(r_state);
gmp_randseed_ui(r_state, time(NULL) * (omp_get_thread_num() + 1));
mpz_urandomb(a, r_state, nb_bits);
double_and_add(&R, P, a, E);
trail_length = 0;
while(collision_count < nb_collisions)
{
if(is_distinguished(R, trailling_bits, &xDist))
{
if(struct_add(a2, a, xDist, xDist_str))
{
if(is_collision(x, a, a2, trailling_bits))
{
#pragma omp critical
{
collision_count++;
mpz_set(x_res, x);
}
}
}
mpz_urandomb(a, r_state, nb_bits);
double_and_add(&R, P, a, E);
trail_length = 0;
}
else
{
r=hash(R.y);
f(R, M[r], &R, E);
trail_length++;
if(trail_length > trail_length_max)
{
mpz_urandomb(a, r_state, nb_bits);
double_and_add(&R, P, a, E);
trail_length = 0;
}
}
}
point_clear(&R);
mpz_clears(a, a2, x, xDist, NULL);
gmp_randclear(r_state);
}
return 0;
}
/** Free all variables used in the previous PCS run.
*
*/
void pcs_clear()
{
uint8_t i;
point_clear(&P);
point_clear(&Q);
curve_clear(&E);
mpz_clear(n);
for(i = 0; i < __NB_ENSEMBLES__; i++)
{
mpz_clears(M[i].x, M[i].y, M[i].z, NULL);
}
struct_free();
}
|
target-4.c | /* { dg-do run } */
#include <stdlib.h>
#define EPS 0.000001
#define N 100000
void init (double *a1, double *a2)
{
double s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void check (double *a, double *b)
{
int i;
for (i = 0; i < N; i++)
if (a[i] - b[i] > EPS || b[i] - a[i] > EPS)
abort ();
}
void vec_mult_ref (double *p, double *v1, double *v2)
{
int i;
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
void vec_mult (double *p, double *v1, double *v2)
{
int i;
#pragma omp target map(to: v1[0:N], v2[:N]) map(from: p[0:N])
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
int main ()
{
double p1[N], p2[N];
double v1[N], v2[N];
init (v1, v2);
vec_mult_ref (p1, v1, v2);
vec_mult (p2, v1, v2);
check (p1, p2);
return 0;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape &shape,
const Context &ctx,
const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 ||
dtype == mshadow::kInt32 || dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string
NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator
profiler_scope_iter = node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1) return dtype;
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
struct MShadowTypeInfo {
std::string name;
int size;
int acc_size;
MShadowTypeInfo(const std::string name, const int size, const int acc_size) :
name(std::move(name)), size(size), acc_size(acc_size) {}
MShadowTypeInfo(const std::string name, const int size) :
MShadowTypeInfo(name, size, size) {}
};
MShadowTypeInfo mshadow_type_info(const int type_flag);
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
data.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "data.h"
#include "../input/dataHamming.h"
#include "../input/dataEuclidean.h"
#include "../input/dataDistanceMatrix.h"
/*void Print(value* v) {
if (v == NULL || v->name == NULL) printf("NULL");
printf("%s", v->name);
}
*/
struct dataAct_t {
value* data;
int dataSize;
int centroidsIndex; // array index :data[centroidsIndex] contains
// the LAST centroid
// The FIRST centroid is in data[dataSize-1]
int* clustIndexes; // array of size K. clustIndexes[i] contains
// the location of the first point in cluster i.
// NOTE: the last element of the last
// cluster is in data[centroidsIndex-1]
int centroidNum; // Number of centroids currently in the data
// array. Always <=k
int k;
// All data (points, cluster numbers, centroids are
// First centroid lies in data[dataSize-k]
};
typedef struct clustExtra_t {
int flagAssign;
int clust;
double clustDist;
int secClust;
double secClustDist;
int secFlagAssign;
} clustExtra;
Data dataAct;
extern dataIF data;
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ClustExtra
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
void InitClustExtra(int point) {
value* pointVal = &dataAct.data[point];
clustExtra* val;
val = malloc(sizeof(clustExtra));
val->clust = -1;
val->clustDist = -1;
val->flagAssign = 0;
val->secClust = -1;
val->secClustDist = -1;
val->secFlagAssign = -1;
val->secFlagAssign = 0;
pointVal->clustExtra = val;
}
void PrintClusters(FILE* output){
int i=0;
int j=0;
for(i=0;i<dataAct.k;i++){
int clustMin = dataAct.clustIndexes[i];
int clustMax = 0;
if (i == dataAct.k - 1)
clustMax = dataAct.centroidsIndex ;
else
clustMax = dataAct.clustIndexes[i + 1];
value clustVal;
GetIthCentroid(i,&clustVal);
int size=clustMax-clustMin;
if(size<0)
size=0;
fprintf(output,"CLUSTER-%d {size: %d, medoid: %s}\n",i,
size, clustVal.name);
}
}
int GetClustSize(int i){
int clustMin = dataAct.clustIndexes[i];
int clustMax = 0;
if (i == dataAct.k - 1)
clustMax = dataAct.centroidsIndex ;
else
clustMax = dataAct.clustIndexes[i + 1];
int size=clustMax-clustMin;
if(size<0)
size=0;
return size+1;
}
void PrintClustersComplete(FILE* output){
int i=0;
int j=0;
for(i=0;i<dataAct.k;i++){
int clustMin = dataAct.clustIndexes[i];
int clustMax = 0;
if (i == dataAct.k - 1)
clustMax = dataAct.centroidsIndex ;
else
clustMax = dataAct.clustIndexes[i + 1];
value clustVal;
GetIthCentroid(i,&clustVal);
int size=clustMax-clustMin;
if(size<0)
size=0;
fprintf(output,"CLUSTER-%d {size: %d, medoid: %s}\n",i,
size, clustVal.name);
if(clustMin<clustMax)
fprintf(output,"CLUSTER-CONTENTS: {");
else
fprintf(output,"CLUSTER-CONTENTS: (empty)");
for(j=clustMin;j<clustMax;j++){
value clustDat;
GetIthData(j,&clustDat);
fprintf(output,"%s,",clustDat.name);
}
if(clustMin<clustMax)
fprintf(output,"}");
fprintf(output,"\n");
}
}
void AssignClustExtra(int point, int clust, double clustDist, int secClust,
double secClustDist) {
value pointVal;
GetIthData(point, &pointVal);
// if(pointVal.clustExtra==NULL) // den exoun noima... den ginontai init
// return;
clustExtra* info = pointVal.clustExtra;
info->clust = clust;
info->clustDist = clustDist;
info->secClust = secClust;
info->secClustDist = secClustDist;
}
double GetDistFirstCentr(int point) {
value pointVal;
GetIthData(point, &pointVal);
//if (pointVal.clustExtra == NULL) return -1;
return ((clustExtra*)pointVal.clustExtra)->clustDist;
}
double GetDistSecCentr(int point) {
value pointVal;
GetIthData(point, &pointVal);
//if (pointVal.clustExtra == NULL) return -1;
return ((clustExtra*)pointVal.clustExtra)->secClustDist;
}
int GetExtrasCluster(int point){
value pointVal;
GetIthData(point, &pointVal);
//if (pointVal.clustExtra == NULL) return -1;
return ((clustExtra*)pointVal.clustExtra)->clust;
}
// void UpdateClustExtra(int point,int clust,double clustDist){
//
// value* pointVal=GetIthData(point);
//
// if(pointVal == NULL || pointVal->clustExtra==NULL)
// return;
//
// clustExtra* info=pointVal->clustExtra;
//
// if(clustDist < info->secClustDist ){
//
// if(clustDist < info->clustDist){
// info->secClustDist=info->clustDist;
// info->secClust=info->clust;
// info->clust=clust;
// info->clustDist=clustDist;
// }
// else{
// info->secClustDist=clustDist;
// info->secClust=clust;
// }
// }
//}
int IsAssigned(int point) {
value pointVal;
GetIthData(point, &pointVal);
//if (pointVal.clustExtra == NULL) return -1;
if ((((clustExtra*)(pointVal.clustExtra))->flagAssign == 1) &&
(((clustExtra*)(pointVal.clustExtra))->secFlagAssign == 1)) {
return 1;
}
return 0;
}
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ClustExtra
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cluster
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
void Swap(int ind1, int ind2) {
if (ind1 >= dataAct.dataSize || ind2 >= dataAct.dataSize)
printf("Swap indexes over datasize\n");
if (ind1 < 0 || ind2 < 0)
printf("Swap indexes under zero\n");
value v;
if (ind1 == ind2)
return;
memcpy(&v, &dataAct.data[ind1], sizeof(value));
memcpy(&dataAct.data[ind1], &dataAct.data[ind2], sizeof(value));
memcpy(&dataAct.data[ind2], &v, sizeof(value));
}
int GetMedoid(int cluster) {
int clustMin = dataAct.clustIndexes[cluster];
int clustMax;
int i, j, minDistIndex;
double sumDist = 0, minDist;
if (cluster == dataAct.k - 1)
clustMax = dataAct.centroidsIndex - 1;
else
clustMax = dataAct.clustIndexes[cluster + 1] - 1;
double* avgDist = malloc(sizeof(double) * (clustMax - clustMin + 1));
minDist = 99999999999;
minDistIndex = 0;
for (i = clustMin; i < clustMax + 1; ++i) {
for (j = clustMin; j < clustMax + 1; ++j) {
if (i == j) continue;
sumDist += data.distance(&dataAct.data[i], &dataAct.data[j]);
if(sumDist > minDist)
continue;
}
if(sumDist > minDist){
sumDist = 0;
continue;
}
value centroid;
GetIthCentroid( cluster, ¢roid);
minDist = sumDist;
minDistIndex = i;
sumDist = 0;
}
free(avgDist);
return minDistIndex;
}
// does not preserve the cluster functionality
int AddCentroid(int centrIndex) {
if (centrIndex > dataAct.dataSize) return -1;
dataAct.centroidNum++;
dataAct.k++;
dataAct.centroidsIndex--;
Swap(centrIndex, dataAct.centroidsIndex);
return dataAct.centroidsIndex;
}
void FirstAssignment() {
int offset = (int)( (dataAct.dataSize - dataAct.k) / (float)dataAct.k);
int i = 0;
dataAct.clustIndexes = malloc(sizeof(int) * dataAct.k);
for (i = 0; i < dataAct.k; i++) {
dataAct.clustIndexes[i] = i * offset;
}
for (i = 0; i < dataAct.dataSize; ++i) InitClustExtra(i);
}
int AddToClust(int index, int clustNum) {
int i = 0;
if (index < 0)
printf("aaaaaaaaaaaaaaaaaaaaaaaaa\n");
int pointCluster = GetClusterOf2(index);
//printf("clust %d\n",GetClusterOf(index));
if (clustNum > dataAct.k - 1) return -1;
int clustMin = dataAct.clustIndexes[clustNum];
int clustMax = 0;
if (clustNum == dataAct.k - 1){
clustMax = dataAct.centroidsIndex - 1;
//printf("%d\n", clustMax);
}
else
clustMax = dataAct.clustIndexes[clustNum + 1] - 1;
// while( index > clustMax +1)
//if(pointCluster != clustNum){
if (index > clustMax ){ //moves left
for (i = pointCluster; i > clustNum; i--) {
//printf("%d\n", dataAct.clustIndexes[pointCluster]);
Swap(index, dataAct.clustIndexes[pointCluster]);
dataAct.clustIndexes[pointCluster]++;
index = dataAct.clustIndexes[pointCluster] - 1;
pointCluster--;
}
return 0;
}
else if (index < clustMin ){ // moves right
for (i = pointCluster; i < clustNum; i++) {
//printf("%d,%d\n",index, dataAct.clustIndexes[pointCluster]);
Swap(index, dataAct.clustIndexes[pointCluster + 1] - 1);
dataAct.clustIndexes[pointCluster + 1]--;
index = dataAct.clustIndexes[pointCluster + 1];
pointCluster++;
}
return 1;
}
else{
//printf("aa %d\n", clustNum);
return 0;
}
//}
}
int GetClusterOf(int index) {
value v;
GetIthData(index, &v);
clustExtra* extra = v.clustExtra;
int ret = extra->clust;
return ret;
}
int GetClusterOf2(int index) {
int i;
for (i = 0; i < GetNoOfCluster(); ++i) {
if (i == GetNoOfCluster() - 1)
return i;
if (dataAct.clustIndexes[i] <= index &&
dataAct.clustIndexes[i + 1] > index)
return i;
}
return -1;
}
void SwapCentroid(int medoid, int oldCentr) {
Swap(medoid, oldCentr + dataAct.centroidsIndex);
}
double ComputeAproximateDJ(int oldCentr, int centroid, int *sample, int sSize) {
// wakawaka wikiwikiwoooooosh
int i = 0;
double DJ = 0.0;
value v1, v2;
GetIthData(centroid, &v2);
for (i = 0; i < sSize; i++) {
GetIthData(sample[i], &v1);
double iFromCentr = data.distance(&v1, &v2);
if (GetClusterOf(sample[i]) == oldCentr) {
if (iFromCentr >= GetDistSecCentr(i))
DJ += GetDistSecCentr(sample[i]) - GetDistFirstCentr(sample[i]);
else
DJ += iFromCentr - GetDistFirstCentr(sample[i]);
} else if (iFromCentr < GetDistFirstCentr(sample[i]))
DJ += iFromCentr - GetDistFirstCentr(sample[i]);
}
double bestDist = 9999999999;
GetIthCentroid( oldCentr, &v2);
for (i = 0; i < dataAct.k; ++i)
{
if ( i == oldCentr)
continue;
GetIthCentroid(i, &v1);
double dist = data.distance(&v1, &v2);
if ( dist < bestDist)
{
bestDist = dist;
}
}
GetIthData( centroid, &v1);
double dist = data.distance(&v1, &v2);
if (dist >= bestDist)
DJ += bestDist;
else
DJ += dist;
return DJ;
return -1;
}
double ComputeDJ(int oldCentr, int centroid) {
// wakawaka wikiwikiwoooooosh
int i = 0;
double DJ = 0.0;
value v1, v2;
for (i = 0; i < GetDataSize() - dataAct.k; i++) {
GetIthData(i, &v1);
GetIthData(centroid, &v2);
double iFromCentr = data.distance(&v1, &v2);
if (GetClusterOf(i) == oldCentr) {
if (iFromCentr >= GetDistSecCentr(i))
DJ += GetDistSecCentr(i) - GetDistFirstCentr(i);
else
DJ += iFromCentr - GetDistFirstCentr(i);
} else if (iFromCentr < GetDistFirstCentr(i))
DJ += iFromCentr - GetDistFirstCentr(i);
}
double bestDist = 9999999999;
int bestPos = -1;
GetIthCentroid( oldCentr, &v2);
for (i = 0; i < dataAct.k; ++i)
{
if ( i == oldCentr)
continue;
GetIthCentroid(i, &v1);
double dist = data.distance(&v1, &v2);
if ( dist < bestDist)
{
bestDist = dist;
bestPos = i;
}
}
GetIthData( centroid, &v1);
double dist = data.distance(&v1, &v2);
if (dist >= bestDist)
DJ += bestDist;
else
DJ += dist;
return DJ;
return -1;
}
void InitAssigned() {
int i;
for (i = 0; i < GetDataSize(); ++i){
value* pointVal = &dataAct.data[i];
clustExtra* val;
val = pointVal->clustExtra;
val->clust = -1;
val->clustDist = -1;
val->flagAssign = 0;
val->secClust = -1;
val->secClustDist = -1;
val->secFlagAssign = -1;
val->secFlagAssign = 0;
}
}
double MinDistCendroids() {
int i,j;
double dist,minDist = 99999999999;
value centr1,centr2;
for(i=0; i<dataAct.k; i++){
GetIthCentroid(i,¢r1);
for(j=0;j<dataAct.k; j++){
if(i==j) continue;
GetIthCentroid(j,¢r2);
dist = data.distance(¢r1,¢r2);
if(dist < minDist)
minDist = dist;
}
}
return minDist;
}
double MaxDistCendroids() {
int i,j;
double dist,maxDist = 0;
value centr1,centr2;
for(i=0; i<dataAct.k; i++){
GetIthCentroid(i,¢r1);
for(j=0;j<dataAct.k; j++){
if(i==j) continue;
GetIthCentroid(j,¢r2);
dist = data.distance(¢r1,¢r2);
if(dist > maxDist)
maxDist = dist;
}
}
return maxDist;
}
int AssignLSH(value *v, int dist, int clustNum, int loop){
clustExtra* extra = v->clustExtra;
//mark 1st assigned
int ret = extra->flagAssign && extra->secFlagAssign;
if(extra->flagAssign == 0){
extra->clust = clustNum;
extra->clustDist = dist;
extra->flagAssign = 1;
return 0;
}
//make second first and first dist/clustNum
if(extra->clustDist > dist){
extra->secClust = extra->clust;
extra->secClustDist = extra->clustDist;
extra->clust = clustNum;
extra->clustDist = dist;
extra->secFlagAssign = 1;
return !ret;
}
if(extra->secFlagAssign == 0){
if(extra->clust!=clustNum){
extra->secClust = clustNum;
extra->secClustDist = dist;
extra->secFlagAssign = 1;
return !ret;
}
return 0;
}
if(extra->secClustDist > dist){
extra->clust = clustNum;
extra->clustDist = dist;
}
return !ret;
}
void GetScoreMatrix(int item, Rating* rating){
int i;
for(i=0; i<GetDataSize(); ++i){
rating[i].rate = data.getScore(i,item);
rating[i].id = data.getId(i,item);
}
}
void SetScores(int item, Rating* rating){
int i;
for ( i = 0; i < GetDataSize(); ++i)
{
data.setScore(i,item, (int)rating[i].rate);
}
}
void DeleteItems(int item){
int i;
for ( i = 0; i < GetDataSize(); ++i)
{
data.deleteItem(i,item);
}
}
double AvgDistToClust(int index, int clust){
int min = dataAct.clustIndexes[clust];
int max;
if(clust == dataAct.k-1)
max = dataAct.centroidsIndex;
else
max = dataAct.clustIndexes[clust+1];
int i;
value v1, v2;
GetIthData( index, &v1);
double sumDist = 0.0;
for (i = min; i < max; ++i )
{
GetIthData( i, &v2);
sumDist += data.distance( &v1, &v2);
}
GetIthCentroid(clust , &v2);
sumDist += data.distance( &v1, &v2);
//if ( max == min)
//return -1;
//printf("size of Cluster %d %d\n", clust, max - min);
//printf("%f\n", sumDist);
return sumDist/(max - min+1);
}
double Silhouette(FILE* output){
double* a,*b,*s, *sAvg;
double sTotal = 0;
a = malloc((GetDataSize()-dataAct.k)*sizeof(double));
b = malloc((GetDataSize()-dataAct.k)*sizeof(double));
s = malloc((GetDataSize()-dataAct.k)*sizeof(double));
sAvg = malloc(dataAct.k*sizeof(double));
int i;
for(i=0;i<dataAct.k;i++){
sAvg[i] = 0;
}
#pragma omp parallel for
for(i=0; i<GetDataSize()-dataAct.k; i++){
a[i] = AvgDistToClust(i, GetClusterOf(i));
double minAvgClusterDist = 9999999999;
int j;
for (j = 0; j < GetNoOfCluster(); ++j)
{
if ( j == GetClusterOf(i))
continue;
double avgClusterDist = AvgDistToClust(i, j);
if ( avgClusterDist < minAvgClusterDist)
minAvgClusterDist = avgClusterDist;
}
b[i] = minAvgClusterDist;
//printf("%f, %f\n", b[i], a[i]);
if(a[i] > b[i])
s[i] = b[i]/a[i] - 1;
else if(a[i] < b[i])
s[i] = 1- a[i]/b[i];
else
s[i] = 0;
//sTotal += s[i];
}
for (i = 0; i < GetDataSize()-dataAct.k; ++i)
{
sTotal += s[i];
sAvg[GetClusterOf(i)]+=s[i];
}
// fprintf(output,"Silhouette: [");
// for(i=0;i<dataAct.k;i++){
// if(i>0)
// fprintf(output,",");
// fprintf(output,"%2f",sAvg[i]/GetClustSize(i));
// }
sTotal = sTotal/(GetDataSize()-dataAct.k);
// fprintf(output,",%2f]\n",sTotal);
free(a);
free(b);
free(s);
free(sAvg);
return sTotal;
}
void DestroyClusters(){
free(dataAct.clustIndexes );
dataAct.k=0;
dataAct.centroidNum=0;
dataAct.centroidsIndex=dataAct.dataSize;
}
void DestroyData(){
int i;
for (i = 0; i < GetDataSize(); ++i)
{
value v;
GetIthData(i, &v);
if(v.name != NULL)
free(v.name);
if(v.clustExtra != NULL)
free(v.clustExtra);
free(v.content);
}
free(dataAct.data);
free(dataAct.clustIndexes);
}
void SetDataSize(int size) { dataAct.dataSize = size; }
int GetAvgClustSize(){
int i,sum = 0;;
for(i=0; i< dataAct.k; ++i){
sum+= GetClustSize(i);
}
return sum/dataAct.k;
}
int GetDataSize() { return dataAct.dataSize; }
void GetIthData(int i, value* v) {
memcpy(v, &dataAct.data[i], sizeof(value));
}
void GetIthCentroid(int i, value* v) {
if (i > dataAct.k) return;
memcpy(v, &dataAct.data[i + dataAct.centroidsIndex], sizeof(value));
}
int GetNoOfCluster() {
return dataAct.k;
}
void SetNoOfCluster(int k){
dataAct.k = k;
}
void PrintData() {
int i;
for (i = 0; i < GetDataSize(); i++) {
data.print(&dataAct.data[i]);
}
}
void parseData(FILE* file, int kLSH) {
dataAct.data = NULL;
dataAct.clustIndexes = NULL;
dataAct.centroidNum = 0;
dataAct.k = 0;
data.readData(&dataAct.data, file, kLSH);
dataAct.centroidsIndex = dataAct.dataSize;
}
void ParseConfig(FILE* file, int* confNums){
char* lineBuff = NULL;
char secBuff1[256];
char secBuff2[256];
int temp[5];
size_t lineSize = 0;
size_t lineLen = 0;
int i=0,count=1;
memset(secBuff1, '\0', sizeof(secBuff1));
memset(secBuff2, '\0', sizeof(secBuff2));
for(i=0; i<5; i++)
confNums[i] = 0;
while ((lineLen = getline(&lineBuff, &lineSize, file)) != -1) {
count++;
if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) {
perror("Malformed file - Exiting...\n");
exit(-1);
}
if(!strcmp(secBuff1,"number_of_clusters:")){
confNums[0] = 1;
temp[0] = atoi(secBuff2);
}
if(!strcmp(secBuff1,"number_of_hash_functions:")){
confNums[1] = 1;
temp[1] = atoi(secBuff2);
}
if(!strcmp(secBuff1,"number_of_hash_tables:")){
confNums[2] = 1;
temp[2] = atoi(secBuff2);
}
if(!strcmp(secBuff1,"clarans_set_fraction:")){
confNums[3] = 1;
temp[3] = atoi(secBuff2);
}
if(!strcmp(secBuff1,"clarans_iterations:")){
confNums[4] = 1;
temp[4] = atoi(secBuff2);
}
}
for(i = 0; i<5; i++){
if(confNums[i] == 1)
confNums[i] = temp[i];
else{
if(i == 1)
confNums[i] = 4;
if(i == 2)
confNums[i] = 5;
if(i == 4)
confNums[i] = 2;
}
}
free(lineBuff);
}
int GetDataOfCluster(int clust, value*** vptr){
if ( clust > dataAct.k)
return -1;
int min = dataAct.clustIndexes[clust];
int max;
if(clust == dataAct.k-1)
max = dataAct.centroidsIndex;
else
max = dataAct.clustIndexes[clust+1];
*vptr = malloc(sizeof(value*)*(max-min));
int i;
for (i = min; i < max; ++i){
// printf("%d %d %d\n",i,min,max);
(*vptr)[i-min] = &(dataAct.data[i]);
}
return max - min;
}
void specifyDataset(FILE* file) {
char* lineBuff = NULL;
char secBuff1[256];
char secBuff2[256];
size_t lineSize = 0;
size_t lineLen = 0;
memset(secBuff1, '\0', sizeof(secBuff1));
memset(secBuff2, '\0', sizeof(secBuff2));
if ((lineLen = getline(&lineBuff, &lineSize, file)) == -1) {
free(lineBuff);
lineBuff = NULL;
exit(-1);
}
if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) {
perror("Malformed file - Exiting...\n");
exit(-1);
}
if (strcmp(secBuff2, "hamming") == 0) {
data.distance = Hamming;
data.inRange = InRangeHamming;
data.print = PrintDataHamming;
data.readData = ReadDataHamming;
data.readQueries = ReadQueriesHamming;
data.G = GHamming;
data.getFirstQuery = GetFirstQueryHamming;
data.getNextQuery = GetNextQueryHamming;
data.initStruct = InitStructHamming;
// data.bruteForce = BruteForceANNHamming;
// data.bruteForceRange = BruteForceInRangeHamming;
data.destroyStruct = DestroyStructHamming;
data.destroyInput = DestroyInputHamming;
data.destroyValue = DestroyValueHamming;
} else if (strcmp(secBuff2, "vector") == 0) {
memset(secBuff1, '\0', sizeof(secBuff1));
memset(secBuff2, '\0', sizeof(secBuff2));
if ((lineLen = getline(&lineBuff, &lineSize, file)) == -1) {
// something went wrong
free(lineBuff);
lineBuff = NULL;
exit(-1);
}
if (sscanf(lineBuff, "%s %s", secBuff1, secBuff2) < 2) {
perror("Malformed file - Exiting...\n");
exit(-1);
}
if (strcmp(secBuff2, "euclidean") == 0) {
data.distance = Euclidean;
data.inRange = InRangeEuclidean;
data.print = PrintDataEuclidean;
data.G = FEuclidean;
data.initStruct = InitStructEuclidean;
data.destroyStruct = DestroyStructEuclidean;
data.destroyInput = DestroyInputEuclidean;
data.destroyValue = DestroyValueEuclidean;
}
} else if (strcmp(secBuff2, "matrix") == 0) {
data.distance = DistanceMatrixDistance;
data.inRange = InRangeDistanceMatrix;
data.print = printEverything;
data.readData = ReadDataDistanceMatrix;
data.readQueries = ReadQueriesDistanceMatrix;
data.G = GDistanceMatrix;
data.getFirst = GetFirstDistanceMatrix;
data.getNext = GetNextDistanceMatrix;
data.getFirstQuery = GetFirstQueryDistanceMatrix;
data.getNextQuery = GetNextQueryDistanceMatrix;
data.initStruct = InitStructDistanceMatrix;
data.bruteForce = BruteForceANNDistanceMatrix;
data.destroyStruct = DestroyStructDistanceMatrix;
data.destroyInput = DestroyInputDistanceMatrix;
data.destroyValue = DestroyValueDistanceMatrix;
} else {
printf(
"\nWrong metric space\nIt is \"%s\" instead of \"hamming\", "
"\"vector\" or \"matrix\"\n\n",
secBuff2);
free(lineBuff);
exit(-1);
}
free(lineBuff);
}
|
LCC04Traversal.h | /**
* @file LCC04Traversal.h
* @author C.Menges, based on tchipevn (original source:
* ls1-mardyn/src/particleContainer/LinkedCellTraversals/C04CellPairTraversal.h)
* @date 15.06.2019
*/
#pragma once
#include "autopas/containers/cellPairTraversals/C08BasedTraversal.h"
#include "autopas/containers/linkedCells/traversals/LCC08CellHandler.h"
#include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h"
#include "autopas/pairwiseFunctors/CellFunctor.h"
#include "autopas/utils/ArrayUtils.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class provides the c04 traversal.
*
* The traversal uses the c04 base step performed on every single cell. Since
* these steps overlap a domain coloring with four colors is applied.
*
* @tparam ParticleCell the type of cells
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam useSoA
* @tparam useNewton3
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
class LCC04Traversal : public C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>,
public LCTraversalInterface<ParticleCell> {
public:
/**
* Constructor of the c04 traversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length.
* @param cellLength cell length.
*/
LCC04Traversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>(dims, pairwiseFunctor,
interactionLength, cellLength),
_cellOffsets32Pack(computeOffsets32Pack()),
_cellHandler(pairwiseFunctor, this->_cellsPerDimension, interactionLength, cellLength, this->_overlap),
_end(utils::ArrayMath::subScalar(utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension), 1l)) {}
void traverseParticlePairs() override;
[[nodiscard]] TraversalOption getTraversalType() const override { return TraversalOption::lc_c04; }
[[nodiscard]] DataLayoutOption getDataLayout() const override { return dataLayout; }
[[nodiscard]] bool getUseNewton3() const override { return useNewton3; }
/**
* C04 traversals are usable, if cellSizeFactor >= 1.0 and there are at least 3 cells for each dimension.
* @return information about applicability
*/
[[nodiscard]] bool isApplicable() const override {
if (dataLayout == DataLayoutOption::cuda) {
return false;
}
// The cellsize cannot be smaller then the cutoff, if OpenMP is used.
// Also see: https://github.com/AutoPas/AutoPas/issues/464
const double minLength = *std::min_element(this->_cellLength.cbegin(), this->_cellLength.cend());
const unsigned long minDim = *std::min_element(this->_cellsPerDimension.cbegin(), this->_cellsPerDimension.cend());
return minLength >= this->_interactionLength and minDim > 3;
}
private:
void traverseSingleColor(std::vector<ParticleCell> &cells, int color);
void processBasePack32(std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex);
constexpr auto computeOffsets32Pack() const;
[[nodiscard]] constexpr long parity(long x, long y, long z) const { return (x + y + z + 24) % 8; }
std::array<std::array<long, 3>, 32> _cellOffsets32Pack;
LCC08CellHandler<ParticleCell, PairwiseFunctor, dataLayout, useNewton3> _cellHandler;
const std::array<long, 3> _end;
};
/**
* Computes the barriers of the aggregation of cells for each color
*
* @tparam ParticleCell
* @tparam PairwiseFunctor
* @tparam dataLayout
* @tparam useNewton3
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
constexpr auto LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::computeOffsets32Pack() const {
using std::make_pair;
using utils::ThreeDimensionalMapping::threeToOneD;
std::array<std::array<long, 3>, 32> cellOffsets32Pack = {};
unsigned int i = 0;
long z = 0l;
cellOffsets32Pack[i++] = {1l, 1l, z};
cellOffsets32Pack[i++] = {1l, 2l, z};
cellOffsets32Pack[i++] = {2l, 1l, z};
cellOffsets32Pack[i++] = {2l, 2l, z};
// z = 1ul; z = 2ul
for (z = 1l; z < 3l; ++z) {
for (long y = 0l; y < 4l; y++) {
for (long x = 0l; x < 4l; x++) {
if ((x == 0l and y == 0l) or (x == 3l and y == 0l) or (x == 0l and y == 3l) or (x == 3l and y == 3l)) {
continue;
}
cellOffsets32Pack[i++] = {x, y, z};
}
}
}
z = 3ul;
cellOffsets32Pack[i++] = {1l, 1l, z};
cellOffsets32Pack[i++] = {1l, 2l, z};
cellOffsets32Pack[i++] = {2l, 1l, z};
cellOffsets32Pack[i++] = {2l, 2l, z};
/// @todo C++20: mark as unlikely
if (i != 32) {
utils::ExceptionHandler::exception("Internal error: Wrong number of offsets (expected: 32, actual: {})", i);
}
return cellOffsets32Pack;
}
/**
* Goes through the cells aggregated by one color and processes the particles in each cell that is part of the
* aggregation by using the barriers saved in _cellOffset32Pack.
*
* @tparam ParticleCell
* @tparam PairwiseFunctor
* @tparam dataLayout
* @tparam useNewton3
* @param cells
* @param base3DIndex
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::processBasePack32(
std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex) {
using utils::ThreeDimensionalMapping::threeToOneD;
std::array<long, 3> index;
const std::array<long, 3> signedDims = utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension);
for (auto Offset32Pack : _cellOffsets32Pack) {
// compute 3D index
bool isIn = true;
for (int d = 0; d < 3; ++d) {
index[d] = base3DIndex[d] + Offset32Pack[d];
isIn &= (index[d] >= 0l) and (index[d] < _end[d]);
}
if (isIn) {
const unsigned long ulIndex = threeToOneD(index, signedDims);
_cellHandler.processBaseCell(cells, ulIndex);
}
}
}
/**
* Go through one color and search for blocks belonging to the specified color.
* Uses two cartesian grids that are overlapping gridwise but not blockwise.
*
* @tparam ParticleCell
* @tparam PairwiseFunctor
* @tparam dataLayout
* @tparam useNewton3
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseParticlePairs() {
auto &cells = *(this->_cells);
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel
#endif
{
for (int color = 0; color < 4; ++color) {
traverseSingleColor(cells, color);
#if defined(AUTOPAS_OPENMP)
if (color < 3) {
#pragma omp barrier
}
#endif
}
} // close parallel region
}
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseSingleColor(
std::vector<ParticleCell> &cells, int color) {
// we need to traverse one body-centered cubic (BCC) grid, which consists of two cartesian grids
// colors 0 and 2 form one cartesian grid
// colors 1 and 3 form another cartesian grid, whose origin is shifted by (2,2,2)
// determine a starting point of one of the grids
std::array<long, 3> startOfThisColor{};
switch (color % 2) {
case 0:
// colours 0 and 2
startOfThisColor = {-2l, -2l, -2l};
break;
case 1:
// colours 1 and 3
startOfThisColor = {0l, 0l, 0l};
break;
}
// calculate whether the calculated starting point is part of the color
long correctParity = parity(startOfThisColor[0], startOfThisColor[1], startOfThisColor[2]);
if (color >= 2) {
correctParity += 4;
}
// to fix intel64 icpc compiler complaints about perfectly nested loop (tested with version 19.0.4.20190416).
const long startX = startOfThisColor[0], endX = _end[0];
const long startY = startOfThisColor[1], endY = _end[1];
const long startZ = startOfThisColor[2], endZ = _end[2];
// first cartesian grid
// grids are interlinked: one grid fills the gaps in the other grid
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(dynamic, 1) collapse(3) nowait
#endif
for (long z = startZ; z < endZ; z += 4) {
for (long y = startY; y < endY; y += 4) {
for (long x = startX; x < endX; x += 4) {
const long par = parity(x, y, z);
if (par != correctParity) {
continue;
}
const std::array<long, 3> base3DIndex = {x, y, z};
processBasePack32(cells, base3DIndex);
}
}
}
}
} // namespace autopas
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
register const Quantum
*magick_restrict reference,
*magick_restrict target;
register double
*k;
ssize_t
v;
(void) ResetMagickMemory(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(x_pixel_sigma_squared));
(void) ResetMagickMemory(xy_sigma,0,sizeof(xy_sigma));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
(void) ResetMagickMemory(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) ResetMagickMemory(y_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
register ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) <= (QuantumRange/2))
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
(void) SetImageAlphaChannel(similarity_image,OffAlphaChannel,exception);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
GB_unaryop__one_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_uint32_uint32
// op(A') function: GB_tran__one_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_uint32_uint32
(
uint32_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
applyedges.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#pragma once
#include <xmmintrin.h>
#include "GMDP/utils/bitvector.h"
template <typename Ta, typename Tvp>
void my_applyedges(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals,
int num_partitions, int* row_pointers, int* col_starts,
int* edge_pointers, Tvp * vpvalue1, int * vpbit_vector1,
Tvp * vpvalue2, int * vpbit_vector2,
int m, int n, int* nnz,
void (*op)(Ta*, const Tvp&, const Tvp&, void*), void* vsp) {
#pragma omp parallel for schedule(dynamic, 1)
for (int p = 0; p < num_partitions; p++) {
// For each column
const int* column_offset = col_indices + col_starts[p];
const int* partitioned_row_offset = row_inds + edge_pointers[p];
Ta* partitioned_val_offset = vals + edge_pointers[p];
const int* col_ptrs_cur = col_ptrs + col_starts[p];
for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1; j++) {
int col_index = col_indices[col_starts[p] + j];
{
//assert(get_bitvector(col_index, vpbit_vector1));
//Tvp Xval = vpvalue1[col_index];
_mm_prefetch((char*)(vpvalue1 + column_offset[j + 4]), _MM_HINT_T0);
int nz_idx = col_ptrs_cur[j];
for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) {
int row_ind = partitioned_row_offset[nz_idx];
assert(get_bitvector(row_ind, vpbit_vector2));
//Tvp Yval = vpvalue2[row_ind];
//Ta Aval = partitioned_val_offset[nz_idx];
//op(&Aval, Xval, Yval, vsp);
//op(&partitioned_val_offset[nz_idx], Xval, vpvalue2[row_ind], vsp);
op(&partitioned_val_offset[nz_idx], vpvalue1[col_index], vpvalue2[row_ind], vsp);
//partitioned_val_offset[nz_idx] = Aval;
}
}
}
}
for (int p = 0; p < num_partitions; p++) {
// nnz += new_nnz[p];
}
*nnz = m * n;
}
template <typename Ta, typename Tvp>
void apply_edges(DCSCTile<Ta>* tile, const DenseSegment<Tvp> * segmentvp1,
const DenseSegment<Tvp> * segmentvp2,
void (*fp)(Ta*, const Tvp&, const Tvp&, void*), void* vsp) {
if(!(tile->isEmpty()))
{
int nnz;
my_applyedges(tile->row_inds, tile->col_ptrs, tile->col_indices, tile->vals,
tile->num_partitions, tile->row_pointers, tile->col_starts,
tile->edge_pointers, segmentvp1->properties->value, segmentvp1->properties->bit_vector,
segmentvp2->properties->value, segmentvp2->properties->bit_vector,
tile->m, tile->n, (&nnz),
fp, vsp);
}
}
|
complex_to_complex_2d.h | #ifndef SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_H
#define SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_H
#include <scitbx/fftpack/complex_to_complex.h>
#include <scitbx/error.h>
#include <omptbx/omp_or_stubs.h>
//#define SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_NO_PRAGMA_OMP
namespace scitbx { namespace fftpack {
//! 2-dimensional complex-to-complex Fast Fourier Transformation.
template <typename RealType,
typename ComplexType = std::complex<RealType> >
class complex_to_complex_2d
{
public:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
typedef RealType real_type;
typedef ComplexType complex_type;
#endif // DOXYGEN_SHOULD_SKIP_THIS
//! Default constructor.
complex_to_complex_2d() {}
//! Initialization for transforms of lengths n.
/*! See also: Constructor of complex_to_complex.
*/
complex_to_complex_2d(const af::int2& n);
//! Initialization for transforms of lengths n0, n1
/*! See also: Constructor of complex_to_complex.
*/
complex_to_complex_2d(std::size_t n0, std::size_t n1);
//! Access the n (or n0, n1) that was passed to the constructor.
af::int2 n() const
{
return af::int2(fft1d_[0].n(), fft1d_[1].n() );
}
//! In-place "forward" Fourier transformation.
/*! See also: complex_to_complex
*/
template <typename MapType>
void forward(
MapType map,
real_type* scratch=0)
{
transform(select_sign<forward_tag>(), map, scratch);
}
//! In-place "backward" Fourier transformation.
/*! See also: complex_to_complex
*/
template <typename MapType>
void backward(
MapType map,
real_type* scratch=0)
{
transform(select_sign<backward_tag>(), map, scratch);
}
protected:
// This accepts complex or real maps.
template <typename Tag, typename MapType>
void
transform(
select_sign<Tag> tag,
MapType map,
real_type* scratch)
{
typedef typename MapType::value_type real_or_complex_type;
real_or_complex_type const* select_overload = 0;
transform(tag, map, scratch, select_overload);
}
// Cast map of real to map of complex.
template <typename Tag, typename MapType>
void
transform(
select_sign<Tag> tag,
MapType map,
real_type* scratch,
real_type const* select_overload)
{
SCITBX_ASSERT(select_overload == 0);
typedef typename MapType::accessor_type accessor_type;
accessor_type dim_real(map.accessor());
if (dim_real[1] % 2 != 0) {
throw error("Number of elements in second dimension must be even.");
}
af::ref<complex_type, accessor_type> cmap(
reinterpret_cast<complex_type*>(map.begin()),
accessor_type(dim_real[0], dim_real[1] / 2));
complex_type const* select_complex_overload = 0;
transform(tag, cmap, scratch, select_complex_overload);
}
// Core routine always works on complex maps.
template <typename Tag, typename MapType>
void
transform(
select_sign<Tag> tag,
MapType map,
real_type* scratch,
complex_type const* select_overload)
{
// FUTURE: move out of class body
{
if (select_overload != 0) {
// Unreachable.
// Trick to avoid g++ 4.4.0 warnings when compiling with -fopenmp.
throw std::runtime_error(__FILE__);
}
int ny = fft1d_[0].n();
int nz = fft1d_[1].n();
int seq_size = 2 * std::max(ny, nz);
scitbx::auto_array<real_type> seq_and_scratch;
if (omp_in_parallel() == 0) omp_set_dynamic(0);
#if !defined(SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_NO_PRAGMA_OMP)
#pragma omp parallel
#endif
{
int num_threads = omp_get_num_threads();
int i_thread = omp_get_thread_num();
#if !defined(SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_NO_PRAGMA_OMP)
#pragma omp single
#endif
{
seq_and_scratch = scitbx::auto_array<real_type>(
new real_type[2 * seq_size * num_threads]);
}
real_type* scratch = seq_and_scratch.get() + 2 * seq_size * i_thread;
complex_type* seq = reinterpret_cast<complex_type*>(scratch + seq_size);
#if !defined(SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_NO_PRAGMA_OMP)
#pragma omp for
#endif
for (int iz = 0; iz < nz; iz++) {
for (int iy = 0; iy < ny; iy++) {
seq[iy] = map(iy, iz);
}
fft1d_[0].transform(tag, seq, scratch);
for (int iy = 0; iy < ny; iy++) {
map(iy, iz) = seq[iy];
}
}
#if !defined(SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_NO_PRAGMA_OMP)
#pragma omp for
#endif
for (int iy = 0; iy < ny; iy++) {
fft1d_[1].transform(tag, &map(iy, 0), scratch);
}
}
}
}
private:
af::tiny<complex_to_complex<real_type, complex_type>, 2> fft1d_;
};
template <typename RealType, typename ComplexType>
complex_to_complex_2d<RealType, ComplexType
>::complex_to_complex_2d(const af::int2& n)
{
for(std::size_t i=0;i<2;i++) {
fft1d_[i] = complex_to_complex<real_type, complex_type>(n[i]);
}
}
template <typename RealType, typename ComplexType>
complex_to_complex_2d<RealType, ComplexType
>::complex_to_complex_2d(std::size_t n0, std::size_t n1)
{
fft1d_[0] = complex_to_complex<real_type, complex_type>(n0);
fft1d_[1] = complex_to_complex<real_type, complex_type>(n1);
}
}} // namespace scitbx::fftpack
#endif // SCITBX_FFTPACK_COMPLEX_TO_COMPLEX_2D_H
|
master_taskloop_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop foo
void test_no_clause() {
int i;
#pragma omp master taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop' must be a for loop}}
#pragma omp master taskloop
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp master taskloop'}}
#pragma omp parallel
#pragma omp master taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(16*t3+Nx+12,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__isge_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_int16
// A.*B function (eWiseMult): GB_AemultB__isge_int16
// A*D function (colscale): GB_AxD__isge_int16
// D*A function (rowscale): GB_DxB__isge_int16
// C+=B function (dense accum): GB_Cdense_accumB__isge_int16
// C+=b function (dense accum): GB_Cdense_accumb__isge_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int16
// C=scalar+B GB_bind1st__isge_int16
// C=scalar+B' GB_bind1st_tran__isge_int16
// C=A+scalar GB_bind2nd__isge_int16
// C=A'+scalar GB_bind2nd_tran__isge_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT16 || GxB_NO_ISGE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
soma_clustering.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
//
// This model examplifies the use of extracellur diffusion and shows
// how to extend the default "Cell". In step 0 one can see how an extra
// data member is added and can be accessed throughout the simulation with
// its Get and Set methods. N cells are randomly positioned in space, of which
// half are of type 1 and half of type -1. Each type secretes a different
// substance. Cells move towards the gradient of their own substance, which
// results in clusters being formed of cells of the same type.
//
#ifndef DEMO_SOMA_CLUSTERING_H_
#define DEMO_SOMA_CLUSTERING_H_
#include <vector>
#include "biodynamo.h"
#include "my_cell.h"
#include "validation_criterion.h"
namespace bdm {
namespace soma_clustering {
enum Substances { kSubstance0, kSubstance1 };
inline int Simulate(int argc, const char** argv) {
auto set_param = [](Param* param) {
// Create an artificial bound for the simulation space
param->bound_space = Param::BoundSpaceMode::kClosed;
param->min_bound = 0;
param->max_bound = 250;
param->unschedule_default_operations = {"mechanical forces"};
};
Simulation simulation(argc, argv, set_param);
// Define initial model
auto* param = simulation.GetParam();
int num_cells = 20000;
#pragma omp parallel
simulation.GetRandom()->SetSeed(4357);
// Define the substances that cells may secrete
// Order: substance id, substance_name, diffusion_coefficient, decay_constant,
// and the resolution of the (finite difference) diffusion grid.
ModelInitializer::DefineSubstance(kSubstance0, "Substance_0", 50.0, 0.1, 20);
ModelInitializer::DefineSubstance(kSubstance1, "Substance_1", 50.0, 0.1, 20);
int cell_type = 1;
std::string substance_name = "Substance_0";
auto construct = [&cell_type, &substance_name](const Double3& position) {
auto* cell = new MyCell(position, cell_type);
cell->SetDiameter(10);
cell->AddBehavior(new Secretion(substance_name));
cell->AddBehavior(new Chemotaxis(substance_name, 5));
return cell;
};
// Construct num_cells/2 cells of type 0
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Construct num_cells/2 cells of type 1
cell_type = -1;
substance_name = "Substance_1";
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Run simulation for N timesteps
simulation.GetScheduler()->Simulate(1000);
// Check if criterion is met
double spatial_range = 5;
auto crit = GetCriterion(spatial_range, num_cells / 8);
if (crit) {
std::cout << "Simulation completed successfully!\n";
}
return !crit;
}
} // namespace soma_clustering
} // namespace bdm
#endif // DEMO_SOMA_CLUSTERING_H_
|
distance_measure.h | #ifndef DISTANCE_H
#define DISTANCE_H
using namespace std;
double manhattan_distance(vector<int>image1,vector<int>image2,int threads){
double dist = 0.0;
// #pragma omp parallel for schedule(static) num_threads(threads)
#pragma omp parallel for reduction(+:dist) num_threads(threads)
for(int i = 0;i<image1.size();i++){
// #pragma omp atomic
dist += abs(image1[i] - image2[i]);
}
return dist;
}
double chebyshev_distance(vector<int>image1,vector<int>image2,int threads){
int size = image1.size();
double dist[size][1] = {0.0};
// memset(double,0,sizeof(dist[0]));
//Following cannot be used for OpenMp v3 or lesser
// #pragma omp parallel for reduction(max:dist) num_threads(threads)
// omp_set_lock() could also be used
#pragma omp parallel for num_threads(threads)
for(int i = 0;i<image1.size();i++){
// if(abs(image1[i] - image2[i])>dist)
dist[i][0] = abs(image1[i] - image2[i]);
}
double maxdist = 0.0;
for(int i = 0;i<image1.size();i++){
if(maxdist<dist[i][0])
maxdist = dist[i][0];
}
return maxdist;
}
double hellinger_distance(vector<int>image1,vector<int>image2,int threads){
double dist = 0.0;
// #pragma omp parallel for schedule(static) num_threads(threads)
#pragma omp parallel for reduction(+:dist) num_threads(threads)
for(int i = 0;i<image1.size();i++){
// #pragma omp atomic
dist += pow((sqrt(image1[i]) - sqrt(image2[i])),2);
}
dist = sqrt(dist)/(sqrt(2));
return dist;
}
double euclidean_distance(vector<int>image1,vector<int>image2,int threads){
double dist = 0.0;
// #pragma omp parallel for schedule(static) num_threads(threads)
#pragma omp parallel for reduction(+:dist) num_threads(threads)
for(int i = 0;i<image1.size();i++){
// #pragma omp atomic
dist += pow((image1[i] - image2[i]),2);
}
dist = sqrt(dist);
return dist;
}
#endif
|
TSDFVoxelGridImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <atomic>
#include <cmath>
#include "open3d/core/Dispatch.h"
#include "open3d/core/Dtype.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/Utility.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/geometry/kernel/TSDFVoxel.h"
#include "open3d/t/geometry/kernel/TSDFVoxelGrid.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Timer.h"
namespace open3d {
namespace t {
namespace geometry {
namespace kernel {
namespace tsdf {
#if defined(__CUDACC__)
void IntegrateCUDA
#else
void IntegrateCPU
#endif
(const core::Tensor& depth,
const core::Tensor& color,
const core::Tensor& indices,
const core::Tensor& block_keys,
core::Tensor& block_values,
// Transforms
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
// Parameters
int64_t resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_max) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size);
// Real data indexer
NDArrayIndexer depth_indexer(depth, 2);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
// Optional color integration
NDArrayIndexer color_indexer;
bool integrate_color = false;
if (color.NumElements() != 0) {
color_indexer = NDArrayIndexer(color, 2);
integrate_color = true;
}
// Plain arrays that does not require indexers
const int* indices_ptr = indices.GetDataPtr<int>();
int64_t n = indices.GetLength() * resolution3;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int block_idx = indices_ptr[workload_idx / resolution3];
int voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// coordinate in world (in voxel)
int64_t x = (xb * resolution + xv);
int64_t y = (yb * resolution + yv);
int64_t z = (zb * resolution + zv);
// coordinate in camera (in voxel -> in meter)
float xc, yc, zc, u, v;
transform_indexer.RigidTransform(
static_cast<float>(x), static_cast<float>(y),
static_cast<float>(z), &xc, &yc, &zc);
// coordinate in image (in pixel)
transform_indexer.Project(xc, yc, zc, &u, &v);
if (!depth_indexer.InBoundary(u, v)) {
return;
}
// Associate image workload and compute SDF and TSDF.
float depth = *depth_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v)) /
depth_scale;
float sdf = (depth - zc);
if (depth <= 0 || depth > depth_max || zc <= 0 ||
sdf < -sdf_trunc) {
return;
}
sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
sdf /= sdf_trunc;
// Associate voxel workload and update TSDF/Weights
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
if (integrate_color) {
float* color_ptr = color_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v));
voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1],
color_ptr[2]);
} else {
voxel_ptr->Integrate(sdf);
}
});
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
#if defined(__CUDACC__)
void ExtractSurfacePointsCUDA
#else
void ExtractSurfacePointsCPU
#endif
(const core::Tensor& indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& points,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& valid_size) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
int64_t n_blocks = indices.GetLength();
int64_t n = n_blocks * resolution3;
// Output
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
if (valid_size < 0) {
utility::LogWarning(
"No estimated max point cloud size provided, using a 2-pass "
"estimation. Surface extraction could be slow.");
// This pass determines valid number of points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAt =
[&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx,
// voxel_idx)
int64_t workload_block_idx = workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
// Enumerate x-y-z directions
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
}
});
});
#if defined(__CUDACC__)
valid_size = count[0].Item<int>();
count[0] = 0;
#else
valid_size = (*count_ptr).load();
(*count_ptr) = 0;
#endif
}
int max_count = valid_size;
if (points.GetLength() == 0) {
points = core::Tensor({max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
NDArrayIndexer point_indexer(points, 1);
// Normals
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
if (normals.value().get().GetLength() == 0) {
normals.value().get() =
core::Tensor({max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
// This pass extracts exact surface points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
// Colors
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
if (colors.value().get().GetLength() == 0) {
colors.value().get() = core::Tensor(
{max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
float no[3] = {0}, ni[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate x-y-z axis
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
if (idx >= valid_size) {
printf("Point cloud size larger than "
"estimated, please increase the "
"estimation!\n");
return;
}
float* point_ptr =
point_indexer.GetDataPtr<float>(idx);
point_ptr[0] =
voxel_size * (x + ratio * int(i == 0));
point_ptr[1] =
voxel_size * (y + ratio * int(i == 1));
point_ptr[2] =
voxel_size * (z + ratio * int(i == 2));
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_i = ptr->GetR();
float g_i = ptr->GetG();
float b_i = ptr->GetB();
color_ptr[0] =
((1 - ratio) * r_o + ratio * r_i) /
255.0f;
color_ptr[1] =
((1 - ratio) * g_o + ratio * g_i) /
255.0f;
color_ptr[2] =
((1 - ratio) * b_o + ratio * b_i) /
255.0f;
}
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx),
ni);
float* normal_ptr =
normal_indexer.GetDataPtr<float>(idx);
float nx = (1 - ratio) * no[0] + ratio * ni[0];
float ny = (1 - ratio) * no[1] + ratio * ni[1];
float nz = (1 - ratio) * no[2] + ratio * ni[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) +
1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
}
}
});
});
#if defined(__CUDACC__)
int total_count = count.Item<int>();
#else
int total_count = (*count_ptr).load();
#endif
utility::LogDebug("{} vertices extracted", total_count);
valid_size = total_count;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
#if defined(__CUDACC__)
void ExtractSurfaceMeshCUDA
#else
void ExtractSurfaceMeshCPU
#endif
(const core::Tensor& indices,
const core::Tensor& inv_indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& vertices,
core::Tensor& triangles,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& vertex_count) {
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
int n_blocks = static_cast<int>(indices.GetLength());
#if defined(__CUDACC__)
core::CUDACachedMemoryManager::ReleaseCache();
#endif
// TODO(wei): profile performance by replacing the table to a hashmap.
// Voxel-wise mesh info. 4 channels correspond to:
// 3 edges' corresponding vertex index + 1 table index.
core::Tensor mesh_structure;
try {
mesh_structure = core::Tensor::Zeros(
{n_blocks, resolution, resolution, resolution, 4},
core::Dtype::Int32, block_keys.GetDevice());
} catch (const std::runtime_error&) {
utility::LogError(
"[MeshExtractionKernel] Unable to allocate assistance mesh "
"structure for Marching "
"Cubes with {} active voxel blocks. Please consider using a "
"larger voxel size (currently {}) for TSDF "
"integration, or using tsdf_volume.cpu() to perform mesh "
"extraction on CPU.",
n_blocks, voxel_size);
}
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer mesh_structure_indexer(mesh_structure, 4);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
int64_t n = n_blocks * resolution3;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize();
// Pass 0: analyze mesh structure, set up one-on-one correspondences
// from edges to vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution), nb_block_masks_indexer,
nb_block_indices_indexer, voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Check per-vertex sign in the cube to determine cube
// type
int table_idx = 0;
for (int i = 0; i < 8; ++i) {
voxel_t* voxel_ptr_i =
GetVoxelAt(static_cast<int>(xv) + vtx_shifts[i][0],
static_cast<int>(yv) + vtx_shifts[i][1],
static_cast<int>(zv) + vtx_shifts[i][2],
static_cast<int>(workload_block_idx));
if (voxel_ptr_i == nullptr) return;
float tsdf_i = voxel_ptr_i->GetTSDF();
float weight_i = voxel_ptr_i->GetWeight();
if (weight_i <= weight_threshold) return;
table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
}
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
mesh_struct_ptr[3] = table_idx;
if (table_idx == 0 || table_idx == 255) return;
// Check per-edge sign determine the cube type
int edges_with_vertices = edge_table[table_idx];
for (int i = 0; i < 12; ++i) {
if (edges_with_vertices & (1 << i)) {
int64_t xv_i = xv + edge_shifts[i][0];
int64_t yv_i = yv + edge_shifts[i][1];
int64_t zv_i = zv + edge_shifts[i][2];
int edge_i = edge_shifts[i][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution,
inv_indices_ptr[block_idx_i]);
// Non-atomic write, but we are safe
mesh_ptr_i[edge_i] = -1;
}
}
});
});
// Pass 1: determine valid number of vertices (if not preset)
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (vertex_count < 0) {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
});
#if defined(__CUDACC__)
vertex_count = count.Item<int>();
#else
vertex_count = (*count_ptr).load();
#endif
}
utility::LogDebug("Total vertex count = {}", vertex_count);
vertices = core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
normals.value().get() =
core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer vertex_indexer(vertices, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
// Pass 2: extract vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
colors.value().get() =
core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution), nb_block_masks_indexer,
nb_block_indices_indexer, voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx, float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer, nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = widx % resolution3;
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// global coordinate (in voxels)
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Obtain voxel ptr
voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float no[3] = {0}, ne[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
voxel_t* voxel_ptr_e =
GetVoxelAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx));
OPEN3D_ASSERT(voxel_ptr_e != nullptr &&
"Internal error: GetVoxelAt returns nullptr.");
float tsdf_e = voxel_ptr_e->GetTSDF();
float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
mesh_struct_ptr[e] = idx;
float ratio_x = ratio * int(e == 0);
float ratio_y = ratio * int(e == 1);
float ratio_z = ratio * int(e == 2);
float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx);
vertex_ptr[0] = voxel_size * (x + ratio_x);
vertex_ptr[1] = voxel_size * (y + ratio_y);
vertex_ptr[2] = voxel_size * (z + ratio_z);
if (extract_normal) {
float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
GetNormalAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx), ne);
float nx = (1 - ratio) * no[0] + ratio * ne[0];
float ny = (1 - ratio) * no[1] + ratio * ne[1];
float nz = (1 - ratio) * no[2] + ratio * ne[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
if (extract_color) {
float* color_ptr = color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_e = voxel_ptr_e->GetR();
float g_e = voxel_ptr_e->GetG();
float b_e = voxel_ptr_e->GetB();
color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f;
color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f;
color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f;
}
}
});
});
// Pass 3: connect vertices and form triangles.
int triangle_count = vertex_count * 3;
triangles = core::Tensor({triangle_count, 3}, core::Dtype::Int64,
block_values.GetDevice());
NDArrayIndexer triangle_indexer(triangles, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
int table_idx = mesh_struct_ptr[3];
if (tri_count[table_idx] == 0) return;
for (size_t tri = 0; tri < 16; tri += 3) {
if (tri_table[table_idx][tri] == -1) return;
int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
for (size_t vertex = 0; vertex < 3; ++vertex) {
int edge = tri_table[table_idx][tri + vertex];
int64_t xv_i = xv + edge_shifts[edge][0];
int64_t yv_i = yv + edge_shifts[edge][1];
int64_t zv_i = zv + edge_shifts[edge][2];
int64_t edge_i = edge_shifts[edge][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
int64_t* triangle_ptr =
triangle_indexer.GetDataPtr<int64_t>(tri_idx);
triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
}
}
});
#if defined(__CUDACC__)
triangle_count = count.Item<int>();
#else
triangle_count = (*count_ptr).load();
#endif
utility::LogInfo("Total triangle count = {}", triangle_count);
triangles = triangles.Slice(0, 0, triangle_count);
}
#if defined(__CUDACC__)
void EstimateRangeCUDA
#else
void EstimateRangeCPU
#endif
(const core::Tensor& block_keys,
core::Tensor& range_minmax_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int down_factor,
int64_t block_resolution,
float voxel_size,
float depth_min,
float depth_max) {
// TODO(wei): reserve it in a reusable buffer
// Every 2 channels: (min, max)
int h_down = h / down_factor;
int w_down = w / down_factor;
range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32,
block_keys.GetDevice());
NDArrayIndexer range_map_indexer(range_minmax_map, 2);
// Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
const int fragment_size = 16;
const int frag_buffer_size = 65535;
// TODO(wei): explicit buffer
core::Tensor fragment_buffer =
core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32,
block_keys.GetDevice());
NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
NDArrayIndexer block_keys_indexer(block_keys, 1);
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32,
block_keys.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
using std::max;
using std::min;
#endif
// Pass 0: iterate over blocks, fill-in an rendering fragment array
launcher::ParallelFor(
block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) {
int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
v_max = 0;
float z_min = depth_max, z_max = depth_min;
float xc, yc, zc, u, v;
// Project 8 corners to low-res image and form a rectangle
for (int i = 0; i < 8; ++i) {
float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
voxel_size;
float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
voxel_size;
float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
voxel_size;
w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
&zc);
if (zc <= 0) continue;
// Project to the down sampled image buffer
w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
u /= down_factor;
v /= down_factor;
v_min = min(static_cast<int>(floorf(v)), v_min);
v_max = max(static_cast<int>(ceilf(v)), v_max);
u_min = min(static_cast<int>(floorf(u)), u_min);
u_max = max(static_cast<int>(ceilf(u)), u_max);
z_min = min(z_min, zc);
z_max = max(z_max, zc);
}
v_min = max(0, v_min);
v_max = min(h_down - 1, v_max);
u_min = max(0, u_min);
u_max = min(w_down - 1, u_max);
if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
// Divide the rectangle into small 16x16 fragments
int frag_v_count =
ceilf(float(v_max - v_min + 1) / float(fragment_size));
int frag_u_count =
ceilf(float(u_max - u_min + 1) / float(fragment_size));
int frag_count = frag_v_count * frag_u_count;
int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
int frag_count_end = frag_count_start + frag_count;
if (frag_count_end >= frag_buffer_size) {
printf("Fragment count exceeding buffer size, abort!\n");
}
int offset = 0;
for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
for (int frag_u = 0; frag_u < frag_u_count;
++frag_u, ++offset) {
float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
frag_count_start + offset);
// zmin, zmax
frag_ptr[0] = z_min;
frag_ptr[1] = z_max;
// vmin, umin
frag_ptr[2] = v_min + frag_v * fragment_size;
frag_ptr[3] = u_min + frag_u * fragment_size;
// vmax, umax
frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
static_cast<float>(v_max));
frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
static_cast<float>(u_max));
}
}
});
#if defined(__CUDACC__)
int frag_count = count[0].Item<int>();
#else
int frag_count = (*count_ptr).load();
#endif
// Pass 0.5: Fill in range map to prepare for atomic min/max
launcher::ParallelFor(
h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int v = workload_idx / w_down;
int u = workload_idx % w_down;
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
range_ptr[0] = depth_max;
range_ptr[1] = depth_min;
});
// Pass 1: iterate over rendering fragment array, fill-in range
launcher::ParallelFor(
frag_count * fragment_size * fragment_size,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int frag_idx = workload_idx / (fragment_size * fragment_size);
int local_idx = workload_idx % (fragment_size * fragment_size);
int dv = local_idx / fragment_size;
int du = local_idx % fragment_size;
float* frag_ptr =
frag_buffer_indexer.GetDataPtr<float>(frag_idx);
int v_min = static_cast<int>(frag_ptr[2]);
int u_min = static_cast<int>(frag_ptr[3]);
int v_max = static_cast<int>(frag_ptr[4]);
int u_max = static_cast<int>(frag_ptr[5]);
int v = v_min + dv;
int u = u_min + du;
if (v > v_max || u > u_max) return;
float z_min = frag_ptr[0];
float z_max = frag_ptr[1];
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
#ifdef __CUDACC__
atomicMinf(&(range_ptr[0]), z_min);
atomicMaxf(&(range_ptr[1]), z_max);
#else
#pragma omp critical(EstimateRangeCPU)
{
range_ptr[0] = min(z_min, range_ptr[0]);
range_ptr[1] = max(z_max, range_ptr[1]);
}
#endif
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
struct BlockCache {
int x;
int y;
int z;
int block_idx;
inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) {
return (xin == x && yin == y && zin == z) ? block_idx : -1;
}
inline void OPEN3D_DEVICE Update(int xin,
int yin,
int zin,
int block_idx_in) {
x = xin;
y = yin;
z = zin;
block_idx = block_idx_in;
}
};
#if defined(__CUDACC__)
void RayCastCUDA
#else
void RayCastCPU
#endif
(std::shared_ptr<core::DeviceHashmap>& hashmap,
const core::Tensor& block_values,
const core::Tensor& range_map,
core::Tensor& vertex_map,
core::Tensor& depth_map,
core::Tensor& color_map,
core::Tensor& normal_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int64_t block_resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_min,
float depth_max,
float weight_threshold) {
using Key = core::Block<int, 3>;
using Hash = core::BlockHash<int, 3>;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
auto cuda_hashmap =
std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap);
if (cuda_hashmap == nullptr) {
utility::LogError(
"Unsupported backend: CUDA raycasting only supports STDGPU.");
}
auto hashmap_impl = cuda_hashmap->GetImpl();
#else
auto cpu_hashmap =
std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap);
auto hashmap_impl = *cpu_hashmap->GetImpl();
#endif
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer range_map_indexer(range_map, 2);
NDArrayIndexer vertex_map_indexer;
NDArrayIndexer depth_map_indexer;
NDArrayIndexer color_map_indexer;
NDArrayIndexer normal_map_indexer;
bool enable_vertex = (vertex_map.GetLength() != 0);
bool enable_depth = (depth_map.GetLength() != 0);
bool enable_color = (color_map.GetLength() != 0);
bool enable_normal = (normal_map.GetLength() != 0);
if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
utility::LogWarning("No output specified for ray casting, exit.");
return;
}
if (enable_vertex) {
vertex_map_indexer = NDArrayIndexer(vertex_map, 2);
}
if (enable_depth) {
depth_map_indexer = NDArrayIndexer(depth_map, 2);
}
if (enable_color) {
color_map_indexer = NDArrayIndexer(color_map, 2);
}
if (enable_normal) {
normal_map_indexer = NDArrayIndexer(normal_map, 2);
}
TransformIndexer c2w_transform_indexer(
intrinsics, t::geometry::InverseTransformation(extrinsics));
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
int64_t rows = h;
int64_t cols = w;
float block_size = voxel_size * block_resolution;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
using std::max;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(rows * cols, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAtP = [&] OPEN3D_DEVICE(
int x_b, int y_b, int z_b,
int x_v, int y_v, int z_v,
core::addr_t block_addr,
BlockCache& cache) -> voxel_t* {
int x_vn = (x_v + block_resolution) % block_resolution;
int y_vn = (y_v + block_resolution) % block_resolution;
int z_vn = (z_v + block_resolution) % block_resolution;
int dx_b = Sign(x_v - x_vn);
int dy_b = Sign(y_v - y_vn);
int dz_b = Sign(z_v - z_vn);
if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_v, y_v, z_v,
block_addr);
} else {
Key key;
key.Set(0, x_b + dx_b);
key.Set(1, y_b + dy_b);
key.Set(2, z_b + dz_b);
int block_addr = cache.Check(key.Get(0), key.Get(1),
key.Get(2));
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(key.Get(0), key.Get(1), key.Get(2),
block_addr);
}
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_vn, y_vn, z_vn,
block_addr);
}
};
auto GetVoxelAtT = [&] OPEN3D_DEVICE(
float x_o, float y_o, float z_o,
float x_d, float y_d, float z_d,
float t,
BlockCache& cache) -> voxel_t* {
float x_g = x_o + t * x_d;
float y_g = y_o + t * y_d;
float z_g = z_o + t * z_d;
// Block coordinate and look up
int x_b = static_cast<int>(floorf(x_g / block_size));
int y_b = static_cast<int>(floorf(y_g / block_size));
int z_b = static_cast<int>(floorf(z_g / block_size));
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
// Voxel coordinate and look up
int x_v = int((x_g - x_b * block_size) / voxel_size);
int y_v = int((y_g - y_b * block_size) / voxel_size);
int z_v = int((z_g - z_b * block_size) / voxel_size);
return voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
x_v, y_v, z_v, block_addr);
};
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float *depth_ptr = nullptr, *vertex_ptr = nullptr,
*normal_ptr = nullptr, *color_ptr = nullptr;
if (enable_depth) {
depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y);
*depth_ptr = 0;
}
if (enable_vertex) {
vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y);
vertex_ptr[0] = 0;
vertex_ptr[1] = 0;
vertex_ptr[2] = 0;
}
if (enable_color) {
color_ptr = color_map_indexer.GetDataPtr<float>(x, y);
color_ptr[0] = 0;
color_ptr[1] = 0;
color_ptr[2] = 0;
}
if (enable_normal) {
normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y);
normal_ptr[0] = 0;
normal_ptr[1] = 0;
normal_ptr[2] = 0;
}
const float* range =
range_map_indexer.GetDataPtr<float>(x / 8, y / 8);
float t = range[0];
const float t_max = range[1];
if (t >= t_max) return;
// Coordinates in camera and global
float x_c = 0, y_c = 0, z_c = 0;
float x_g = 0, y_g = 0, z_g = 0;
float x_o = 0, y_o = 0, z_o = 0;
// Iterative ray intersection check
float t_prev = t;
float tsdf_prev = -1.0f;
float tsdf = 1.0;
float w = 0.0;
// Camera origin
c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o,
&z_o);
// Direction
c2w_transform_indexer.Unproject(static_cast<float>(x),
static_cast<float>(y), 1.0f,
&x_c, &y_c, &z_c);
c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g,
&y_g, &z_g);
float x_d = (x_g - x_o);
float y_d = (y_g - y_o);
float z_d = (z_g - z_o);
BlockCache cache{0, 0, 0, -1};
bool surface_found = false;
while (t < t_max) {
voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
y_d, z_d, t, cache);
if (!voxel_ptr) {
t_prev = t;
t += block_size;
} else {
tsdf_prev = tsdf;
tsdf = voxel_ptr->GetTSDF();
w = voxel_ptr->GetWeight();
if (tsdf_prev > 0 && w >= weight_threshold &&
tsdf <= 0) {
surface_found = true;
break;
}
t_prev = t;
float delta = tsdf * sdf_trunc;
t += delta < voxel_size ? voxel_size : delta;
}
}
if (surface_found) {
float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
(tsdf_prev - tsdf);
x_g = x_o + t_intersect * x_d;
y_g = y_o + t_intersect * y_d;
z_g = z_o + t_intersect * z_d;
// Trivial vertex assignment
if (enable_depth) {
*depth_ptr = t_intersect * depth_scale;
}
if (enable_vertex) {
w2c_transform_indexer.RigidTransform(
x_g, y_g, z_g, vertex_ptr + 0,
vertex_ptr + 1, vertex_ptr + 2);
}
// Trilinear interpolation
// TODO(wei): simplify the flow by splitting the
// functions given what is enabled
if (enable_color || enable_normal) {
int x_b =
static_cast<int>(floorf(x_g / block_size));
int y_b =
static_cast<int>(floorf(y_g / block_size));
int z_b =
static_cast<int>(floorf(z_g / block_size));
float x_v = (x_g - float(x_b) * block_size) /
voxel_size;
float y_v = (y_g - float(y_b) * block_size) /
voxel_size;
float z_v = (z_g - float(z_b) * block_size) /
voxel_size;
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
int x_v_floor = static_cast<int>(floorf(x_v));
int y_v_floor = static_cast<int>(floorf(y_v));
int z_v_floor = static_cast<int>(floorf(z_v));
float ratio_x = x_v - float(x_v_floor);
float ratio_y = y_v - float(y_v_floor);
float ratio_z = z_v - float(z_v_floor);
float sum_weight_color = 0.0;
float sum_weight_normal = 0.0;
for (int k = 0; k < 8; ++k) {
int dx_v = (k & 1) > 0 ? 1 : 0;
int dy_v = (k & 2) > 0 ? 1 : 0;
int dz_v = (k & 4) > 0 ? 1 : 0;
float ratio = (dx_v * (ratio_x) +
(1 - dx_v) * (1 - ratio_x)) *
(dy_v * (ratio_y) +
(1 - dy_v) * (1 - ratio_y)) *
(dz_v * (ratio_z) +
(1 - dz_v) * (1 - ratio_z));
voxel_t* voxel_ptr_k = GetVoxelAtP(
x_b, y_b, z_b, x_v_floor + dx_v,
y_v_floor + dy_v, z_v_floor + dz_v,
block_addr, cache);
if (enable_color && voxel_ptr_k &&
voxel_ptr_k->GetWeight() > 0) {
sum_weight_color += ratio;
color_ptr[0] += ratio * voxel_ptr_k->GetR();
color_ptr[1] += ratio * voxel_ptr_k->GetG();
color_ptr[2] += ratio * voxel_ptr_k->GetB();
}
if (enable_normal) {
for (int dim = 0; dim < 3; ++dim) {
voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
x_b, y_b, z_b,
x_v_floor + dx_v + (dim == 0),
y_v_floor + dy_v + (dim == 1),
z_v_floor + dz_v + (dim == 2),
block_addr, cache);
voxel_t* voxel_ptr_k_minus =
GetVoxelAtP(x_b, y_b, z_b,
x_v_floor + dx_v -
(dim == 0),
y_v_floor + dy_v -
(dim == 1),
z_v_floor + dz_v -
(dim == 2),
block_addr, cache);
bool valid = false;
if (voxel_ptr_k_plus &&
voxel_ptr_k_plus->GetWeight() > 0) {
normal_ptr[dim] +=
ratio *
voxel_ptr_k_plus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
if (voxel_ptr_k_minus &&
voxel_ptr_k_minus->GetWeight() >
0) {
normal_ptr[dim] -=
ratio *
voxel_ptr_k_minus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
sum_weight_normal += valid ? ratio : 0;
}
} // if (enable_normal)
} // loop over 8 neighbors
if (enable_color && sum_weight_color > 0) {
sum_weight_color *= 255.0;
color_ptr[0] /= sum_weight_color;
color_ptr[1] /= sum_weight_color;
color_ptr[2] /= sum_weight_color;
}
if (enable_normal && sum_weight_normal > 0) {
normal_ptr[0] /= sum_weight_normal;
normal_ptr[1] /= sum_weight_normal;
normal_ptr[2] /= sum_weight_normal;
float norm =
sqrt(normal_ptr[0] * normal_ptr[0] +
normal_ptr[1] * normal_ptr[1] +
normal_ptr[2] * normal_ptr[2]);
w2c_transform_indexer.Rotate(
normal_ptr[0] / norm,
normal_ptr[1] / norm,
normal_ptr[2] / norm, normal_ptr + 0,
normal_ptr + 1, normal_ptr + 2);
}
} // if (color or normal)
} // if (tsdf < 0)
});
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
} // namespace tsdf
} // namespace kernel
} // namespace geometry
} // namespace t
} // namespace open3d
|
GB_unaryop__minv_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint64
// op(A') function: GB_tran__minv_fp64_uint64
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint64
(
double *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
O9vertRec.c | #include <mpi.h>
#include "grid.h"
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_temp;
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_dvg;
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_o9var;
extern GVAL *restrict * restrict tmpVR;
void O9vertRec(GRID * g)
{
{
size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
const size_t height_index = (0);
{
for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) {
tmpVR[0][cell_index] = gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)];
gv_o9var->data_pointer.p3[(block_index)][(height_index)][(cell_index)] = gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)];
}
}
}
}
{
size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (1); height_index < (g->height); height_index++) {
for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) {
tmpVR[height_index][cell_index] = gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] + gv_dvg->data_pointer.p3[(block_index)][((height_index) - 1)][(cell_index)];
gv_o9var->data_pointer.p3[(block_index)][(height_index)][(cell_index)] = 0.5 * (gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)] + gv_temp->data_pointer.p3[(block_index)][((height_index) - 1)][(cell_index)]) + 0.05 * tmpVR[height_index - 1][cell_index];
}
}
}
}
}
|
rsvd_test.c | #include "matrix_funs_intel_mkl.h"
/* parameter setting region */
//input data file
char filename[] = "exp3_2E3_2E3.dat";
int m = 2000; // row number of matrix
int n = 2000; // column number of matrix
//test values of rank, oversampling, and block size
int L[10] = {20,30,30, -1}; // rank parameter + oversampling
int RANK[10] = {16, 20, 24, -1}; // rank parameter
int KSTEP[10] = {10,-1}; // block size. -1 is an ending token of array
//Other settings
BOOL outputflag= false; //output U, S, V to disk file?
int round = 3; //the times of running for each setting
int p4rSVDsp= 0; //0 or 1, only valid for rSVDsp alg.
/* end parameter setting */
char expname[50];
// algorithm 1
void rSVDbasic_test()
{
// int ntrials = 3;
int i, j;
int k, ki, p=0;
int rank;
FILE *fid_U, *fid_V, *fid_S;
int timesi;
printf("----Begin rSVDbasic test!\n");
for (ki = 0; L[ki]>0; ki++)
{
k=L[ki];
rank = RANK[ki];
printf("k = %d\n", rank);
struct timeval start_timeval, end_timeval;
double sum = 0;
double time;
for (timesi = 0; timesi < round; timesi++)
{
printf("\nround No. %d\n", timesi);
mat *Q, *B;
mat *U, *V;
mat *Utrue;
vec *E;
U = matrix_new(k,k);
V = matrix_new(n,k);
E = vector_new(min(k,n));
Utrue = matrix_new(m, k);
gettimeofday(&start_timeval, NULL);
rSVDbasic(filename, m, n, k, p, &Q, &B);
svd_row_cut (B, U, E, V);
matrix_matrix_mult_row(Q, U, Utrue);
gettimeofday(&end_timeval, NULL);
if (outputflag && timesi == 0)
{
char U_buffer[100];
char V_buffer[100];
char S_buffer[100];
sprintf(U_buffer,"%s_rSVDbasic_k=%d_U.dat", expname,rank);
sprintf(V_buffer,"%s_rSVDbasic_k=%d_V.dat", expname,rank);
sprintf(S_buffer,"%s_rSVDbasic_k=%d_S.dat", expname,rank);
// printf("%s",U_buffer);
fid_U = fopen(U_buffer,"w");//mark
fid_V = fopen(V_buffer,"w");//mark
fid_S = fopen(S_buffer,"w");//mark
double *u = (double*)malloc(rank*m*sizeof(double));
double *v = (double*)malloc(n*rank*sizeof(double));
#pragma omp parallel shared(rank,m, u, Utrue) private(i,j)
{
#pragma omp parallel for
for (i = 0; i < m; i++)
{
for (j = 0; j< rank; j++)
{
u[i*rank+j] = Utrue->d[i*Utrue->ncols + j];
}
}
}
#pragma omp parallel shared(rank,n, v, V) private(i,j)
{
#pragma omp parallel for
for (i = 0; i< n; i++)
{
for (j = 0; j<rank;j++)
{
v[i*rank+j] = V->d[i*V->ncols +j];
}
}
}
fwrite(u, sizeof(double), m*rank, fid_U);
fwrite(v, sizeof(double), n*rank, fid_V);
fwrite(E->d, sizeof(double), E->nrows, fid_S);
fclose(fid_U);
fclose(fid_V);
fclose(fid_S);
free(u);
free(v);
}
matrix_delete(U);
matrix_delete(V);
vector_delete(E);
matrix_delete(Q);
matrix_delete(B);
matrix_delete(Utrue);
sum += get_seconds_frac(start_timeval,end_timeval);
}
time = sum / round;
printf("\nAverage runtime of rSVDbasic alg.: %f second\n\n", time);
}
}
// algorithm 4
void rSVDsp_test()
{
// int ntrials = 3;
int i, j;
int p, k, kstep, ni, ki, kstepi;
int rank;
FILE *fid_U, *fid_V, *fid_S;
int timesi;
p= p4rSVDsp; //0 default, 1 with an extra pass.
printf("----Begin rSVDsp test!\n");
for (ki = 0; L[ki]>0; ki++)
{
k=L[ki];
rank = RANK[ki];
for (kstepi=0; KSTEP[kstepi]>0; kstepi++)
{
kstep= KSTEP[kstepi];
printf("k = %d, kstep = %d, p=%d\n", rank, kstep, p);
struct timeval start_timeval, end_timeval;
double sum = 0;
double time;
for (timesi = 0; timesi < round; timesi++)
{
printf("\nround No. %d\n", timesi);
mat *Q, *B;
mat *U, *V;
mat *Utrue;
vec *E;
U = matrix_new(k,k);
V = matrix_new(n,k);
E = vector_new(min(k,n));
Utrue = matrix_new(m, k);
gettimeofday(&start_timeval, NULL);
rSVDsp(filename,m, n, k, kstep, p, &Q, &B);
svd_row_cut (B, U, E, V);
matrix_matrix_mult_row(Q, U, Utrue);
gettimeofday(&end_timeval, NULL);
if (outputflag && timesi == 0)
{
char U_buffer[100];
char V_buffer[100];
char S_buffer[100];
sprintf(U_buffer,"%s_rSVDsp_k=%d_U.dat", expname,rank);
sprintf(V_buffer,"%s_rSVDsp_k=%d_V.dat", expname,rank);
sprintf(S_buffer,"%s_rSVDsp_k=%d_S.dat", expname,rank);
fid_U = fopen(U_buffer,"w");
fid_V = fopen(V_buffer,"w");
fid_S = fopen(S_buffer,"w");
double *u = (double*)malloc(rank*m*sizeof(double));
double *v = (double*)malloc(n*rank*sizeof(double));
#pragma omp parallel shared(rank,m, u, Utrue) private(i,j)
{
#pragma omp parallel for
for (i = 0; i < m; i++)
{
for (j = 0; j< rank; j++)
{
u[i*rank+j] = Utrue->d[i*Utrue->ncols + j];
}
}
}
#pragma omp parallel shared(rank,n, v, V) private(i,j)
{
#pragma omp parallel for
for (i = 0; i< n; i++)
{
for (j = 0; j<rank;j++)
{
v[i*rank+j] = V->d[i*V->ncols +j];
}
}
}
//printf("PCA_svd_mem takes up %d\n", getCurrentRSS()/1024);
fwrite(u, sizeof(double), m*rank, fid_U);
fwrite(v, sizeof(double), n*rank, fid_V);
fwrite(E->d, sizeof(double), E->nrows, fid_S);
fclose(fid_U);
fclose(fid_V);
fclose(fid_S);
free(u);
free(v);
}
// fid_time = fopen("result_mem/result1.txt","a");
//printf("PCA_mem takes up %d\n", getCurrentRSS()/1024);
// fclose(fid_time);
matrix_delete(U);
matrix_delete(V);
vector_delete(E);
matrix_delete(Q);
matrix_delete(B);
matrix_delete(Utrue);
sum += get_seconds_frac(start_timeval,end_timeval);;
}
time = sum / round;
printf("\nAverage runtime of rSVDsp alg.: %f second\n\n", time);
}
}
}
//algorithm 2
void rSVD_exSP_test()
{
// int ntrials = 3;
int i, j;
int p=2, k, kstep, ni, ki;
int rank;
FILE *fid_U, *fid_V, *fid_S;
int timesi;
printf("----Begin rSVD_exSP test!\n");
for (ki = 0; L[ki]>0; ki++)
{
k=L[ki];
rank = RANK[ki];
// fid_time = fopen("result_mem/result2.txt","a"); //mark
// fprintf(fid_time,"rank = %d, k = %d\n", rank, k);
//printf("rank = %d, l = %d\n", rank, k);
// fclose(fid_time);
printf("k = %d\n", rank);
struct timeval start_timeval, end_timeval;
double sum = 0;
double time;
for (timesi = 0; timesi < round; timesi++)
{
printf("\nround No. %d\n", timesi);
mat *Q, *B, *_Q;
mat *U, *V;
mat *Utrue;
mat *H;
vec *E;
U = matrix_new(k,k);
V = matrix_new(k,k);
E = vector_new(k);
Utrue = matrix_new(m, k);
H = matrix_new(n, k);
gettimeofday(&start_timeval, NULL);
rSVD_exSP(filename,m, n, k, p, &Q, &_Q, &B);
svd_row_cut (B, U, E, V);
matrix_matrix_mult_row(Q, U, Utrue);
matrix_matrix_mult_row(_Q, V, H);
gettimeofday(&end_timeval, NULL);
if (outputflag && timesi == 0)
{
char U_buffer[100];
char V_buffer[100];
char S_buffer[100];
sprintf(U_buffer,"%s_rSVD_exSP_k=%d_U.dat", expname,rank);
sprintf(V_buffer,"%s_rSVD_exSP_k=%d_V.dat", expname,rank);
sprintf(S_buffer,"%s_rSVD_exSP_k=%d_S.dat", expname,rank);
fid_U = fopen(U_buffer,"w");//mark
fid_V = fopen(V_buffer,"w");//mark
fid_S = fopen(S_buffer,"w");//mark
double *u = (double*)malloc(rank*m*sizeof(double));
double *v = (double*)malloc(n*rank*sizeof(double));
#pragma omp parallel shared(rank,m, u, Utrue) private(i,j)
{
#pragma omp parallel for
for (i = 0; i < m; i++)
{
for (j = 0; j< rank; j++)
{
u[i*rank+j] = Utrue->d[i*Utrue->ncols + j];
}
}
}
#pragma omp parallel shared(rank,n, v, H) private(i,j)
{
#pragma omp parallel for
for (i = 0; i< n; i++)
{
for (j = 0; j<rank;j++)
{
v[i*rank+j] = H->d[i*H->ncols +j];
}
}
}
//printf("PCA_svd_mem takes up %d\n", getCurrentRSS()/1024);
fwrite(u, sizeof(double), m*rank, fid_U);
fwrite(v, sizeof(double), n*rank, fid_V);
fwrite(E->d, sizeof(double), E->nrows, fid_S);
fclose(fid_U);
fclose(fid_V);
fclose(fid_S);
free(u);
free(v);
}
// fid_time = fopen("result_mem/result1.txt","a");
//printf("takes up %d\n", getCurrentRSS()/1024);
// fclose(fid_time);
matrix_delete(U);
matrix_delete(V);
vector_delete(E);
matrix_delete(Q);
matrix_delete(B);
matrix_delete(_Q);
matrix_delete(Utrue);
matrix_delete(H);
sum += get_seconds_frac(start_timeval,end_timeval);;
//sum = get_seconds_frac(start_timeval,end_timeval);
//printf("%f\n", get_seconds_frac(start_timeval,end_timeval));
}
time = sum / round;
printf("\nAverage runtime of rSVD_exSP alg.: %f second\n\n", time);
}
}
int main() {
char * ptr1, *ptr2;
int len, i;
ptr1= strrchr(filename, '/');
ptr2= strchr(filename, '.');
if(ptr1== NULL){
i=0;
while(filename[i]!='.'){
expname[i]= filename[i];
i++;
}
expname[i]='\0';
}
else{
len= ptr2-ptr1-1;
for(i=0; i<len; i++){
expname[i]= ptr1[i+1];
}
expname[i]='\0';
}
printf("***************************************************************\n");
printf("Test single-pass SVD algorithm (rSVDsp) and other counterparts.\n");
printf("***************************************************************\n\n");
printf("Input matrix file: %s\n\n", filename);
rSVDbasic_test();
rSVD_exSP_test();
rSVDsp_test();
}
|
convolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_transform_kernel_pack4to1_neon(const Mat& weight_data, Mat& weight_data_pack4to1, int num_input, int num_output, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = 4a-kw-kh-inch/4a-outch
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_pack4to1.create(maxk, num_input / 4, num_output, (size_t)4 * 4, 4);
for (int q = 0; q < num_output; q++)
{
const Mat k0 = weight_data_r2.channel(q);
Mat g0 = weight_data_pack4to1.channel(q);
for (int p = 0; p + 3 < num_input; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
float* g00 = g0.row(p / 4);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k02[k];
g00[3] = k03[k];
g00 += 4;
}
}
}
}
static void convolution_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++) // 29.23
{
float32x4_t _val = vld1q_f32(sptr + space_ofs[k] * 4);
float32x4_t _w = vld1q_f32(kptr);
float32x4_t _s4 = vmulq_f32(_val, _w);
#if __aarch64__
sum += vaddvq_f32(_s4); // dot
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_s4), vget_high_f32(_s4));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#endif
kptr += 4;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
statsutils.h | #ifndef STATS_UTILS_H
#define STATS_UTILS_H
#include "common.h"
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
#endif
#define EIGEN_USE_MKL_ALL
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Geometry>
#include <eigen3/Eigen/LU>
#include <opencv2/opencv.hpp>
namespace StatsUtils {
static MatrixXd cov(const MatrixXd& mat) {
MatrixXd centered = mat.rowwise() - mat.colwise().mean();
MatrixXd cov_mat = (centered.adjoint() * centered) / double(mat.rows() - 1);
return cov_mat;
};
static MatrixXd corr(const MatrixXd& mat) {
MatrixXd cov_mat = cov(mat);
MatrixXd corr_mat = MatrixXd::Zero(cov_mat.rows(), cov_mat.cols());
for(int i=0;i<cov_mat.rows();++i) {
for(int j=0;j<cov_mat.cols();++j) {
corr_mat(i, j) = corr_mat(j, i) = cov_mat(i, j) / sqrt(cov_mat(i, i) * cov_mat(j, j));
}
}
return corr_mat;
};
static MatrixXd dist(const MatrixXd& mat) {
const int nsamples = mat.rows();
MatrixXd dist_mat = MatrixXd::Zero(nsamples, nsamples);
for(int i=0;i<nsamples;++i) {
for(int j=i+1;j<nsamples;++j) {
dist_mat(i, j) = dist_mat(j, i) = (mat.row(i) - mat.row(j)).norm();
}
}
return dist_mat;
}
static VectorXd mean(const MatrixXd& mat, int dim = 1) {
switch(dim) {
case 1: {
// column wise mean
VectorXd m = VectorXd::Zero(mat.cols());
for(int i=0;i<mat.rows();++i) {
m += mat.row(i).transpose();
}
m = m / static_cast<double>(mat.rows());
return m;
}
case 2: {
// row wise mean
VectorXd m = VectorXd::Zero(mat.rows());
for(int i=0;i<mat.cols();++i) {
m += mat.col(i);
}
m = m / static_cast<double>(mat.cols());
return m;
}
}
}
static MatrixXd normalize(const MatrixXd& mat) {
const double max_val = mat.maxCoeff();
const double min_val = mat.minCoeff();
const double diff_val = max_val - min_val;
const double DIFF_THRESHOLD = 1e-16;
if(diff_val <= DIFF_THRESHOLD) {
cerr << "Near-zero matrix. Not normalized." << endl;
return mat;
}
MatrixXd normalized_mat(mat.rows(), mat.cols());
for(int i=0;i<mat.rows();++i) {
for(int j=0;j<mat.cols();++j) {
normalized_mat(i, j) = (mat(i, j) - min_val) / diff_val;
}
}
return normalized_mat;
}
static VectorXd randvec(int N, double range) {
VectorXd v(N);
for(int i=0;i<N;++i) {
v[i] = (rand()/static_cast<double>(RAND_MAX) - 0.5) * 2.0 * range;
}
return v;
}
static VectorXd perturb(const VectorXd& v, double range, const MatrixXd& cov_mat = MatrixXd()) {
cout << "perturbation of identity weights ..." << endl;
const int N = v.rows();
if(cov_mat.rows()==0 || cov_mat.cols()==0) {
cout << "empty cov matrix..." << endl;
return v + randvec(N, range);
} else {
VectorXd res = randvec(N, range);
for(int i=0;i<N;++i) {
res[i] *= cov_mat(i, i);
}
return v + res;
}
}
static vector<int> FindConsistentSet_kMeans(const MatrixXd& x, int k) {
// Run a k-means to separate the good set and others
// Compute the centroid of the good set
// Pick k closest as the consistent set
return vector<int>();
}
static vector<int> FindConsistentSet(const MatrixXd& x, double h, int k,
VectorXd* centroid_out=nullptr) {
// Meanshift until converged
int ndims = x.rows(), nsamples = x.cols();
MatrixXd m(ndims, nsamples);
MatrixXd y = x;
const double th = 1e-6;
const int max_iters = 100;
bool done = false;
int iters = 0;
double ms = 0;
while(!done && iters < max_iters) {
for(int i=0;i<nsamples;++i) {
double gsum = 0;
VectorXd yi = VectorXd::Zero(ndims);
for(int j=0;j<nsamples;++j) {
if(j==i) continue;
else {
VectorXd dj = (y.col(i) - x.col(j))/h;
double gj = exp(-dj.transpose() * dj);
gsum += gj;
yi += gj * x.col(j);
}
}
m.col(i) = yi / (gsum + 1e-8);
}
ms = m.maxCoeff();
if(ms < th) {
cout << "ms = " << ms << endl;
done = true;
} else {
y = m;
++iters;
cout << "iteration " << iters << ": " << ms << endl;
}
}
// Find the highest density cluster, compute its centroid
vector<double> d(nsamples, 0);
for(int i=0;i<nsamples;++i) {
for(int j=0;j<nsamples;++j) {
d[i] += exp(-(y.col(i) - y.col(j)).squaredNorm());
}
}
double max_d = 0;
int max_idx = -1;
for(int i=0;i<nsamples;++i) {
if(d[i] > max_d) {
max_d = d[i];
max_idx = i;
}
}
VectorXd centroid = y.col(max_idx);
if(centroid_out != nullptr) {
// Write the centroid to the output
*centroid_out = centroid;
}
// Compute the distance between the centroid and each input point
vector<pair<int, double>> dists(nsamples);
for(int i=0;i<nsamples;++i) {
dists[i] = make_pair(i, (y.col(i) - centroid).norm());
}
std::sort(dists.begin(), dists.end(),
[](const pair<int, double>& a, const pair<int, double>& b) {
return a.second < b.second;
});
// Pick the k nearest points as the consistent set
vector<int> consistent_set;
for(int i=0;i<k;++i) {
consistent_set.push_back(dists[i].first);
}
return consistent_set;
}
static cv::Mat MeanShiftSegmentation(const cv::Mat& x, double hs, double hr, double th) {
cout << "Mean shift segmentation." << endl;
int height = x.rows, width = x.cols;
bool done = false;
int iters = 0;
// weight map for color space
vector<double> weight_map(255*255+1, 0);
for(int i=0;i<255*255+1;++i) {
weight_map[i] = exp(-i/(hr*hr));
}
const int max_iters = 16;
cv::Mat y = x.clone();
while(!done) {
++iters;
cv::Mat weightAccum(height, width, CV_64F, 0.0);
cv::Mat yAccum(height, width, CV_64FC3, 0.0);
cv::Mat xThis(height, width, CV_64FC3);
for(int i=-hs;i<=hs;++i) {
for(int j=-hs;j<=hs;++j) {
if(i==0 && j==0) continue;
double spatialKernel = 1.0;
#pragma omp parallel for
for(int r=0;r<height;++r) {
int r0 = r + i;
if(r0<0) r0 += height;
if(r0>=height) r0 -= height;
for(int c=0;c<width;++c) {
int c0 = c + j;
if(c0<0) c0 += width;
if(c0>=width) c0 -= width;
xThis.at<cv::Vec3d>(r, c) = x.at<cv::Vec3d>(r0, c0);
}
}
cv::Mat xDiffSq = y - xThis;
#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
cv::Vec3d pix = xDiffSq.at<cv::Vec3d>(r, c);
xDiffSq.at<cv::Vec3d>(r, c) = cv::Vec3d(pix[0]*pix[0] + 1, pix[1]*pix[1] + 1, pix[2]*pix[2] + 1);
}
}
cv::Mat intensityKernel(height, width, CV_64F);
#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
cv::Vec3d pix = xDiffSq.at<cv::Vec3d>(r, c);
intensityKernel.at<double>(r, c) = weight_map[pix[0]] * weight_map[pix[1]] * weight_map[pix[2]];
}
}
cv::Mat weightThis = intensityKernel * spatialKernel;
weightAccum += weightThis;
#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
cv::Vec3d pix = xThis.at<cv::Vec3d>(r, c);
yAccum.at<cv::Vec3d>(r, c) += pix * weightThis.at<double>(r, c);
}
}
}
}
cv::Mat yThis = yAccum.clone();
#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
yThis.at<cv::Vec3d>(r, c) /= (weightAccum.at<double>(r, c) + 1e-16);
}
}
double yMS = 0;
//#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
cv::Vec3d p1 = yThis.at<cv::Vec3d>(r, c);
cv::Vec3d p0 = y.at<cv::Vec3d>(r, c);
yMS += fabs(round(p1[0]) - round(p0[0]));
yMS += fabs(round(p1[1]) - round(p0[1]));
yMS += fabs(round(p1[2]) - round(p0[2]));
}
}
yMS /= (height*width*3);
cout << "iteration " << iters << ": " << yMS << endl;
y = yThis.clone();
#pragma omp parallel for
for(int r=0;r<height;++r) {
for(int c=0;c<width;++c) {
cv::Vec3d pix = yThis.at<cv::Vec3d>(r, c);
y.at<cv::Vec3d>(r, c) = cv::Vec3d(round(pix[0]), round(pix[1]), round(pix[2]));
}
}
if(yMS<=th || iters > max_iters) {
done = true;
}
}
return y;
}
}
#endif
|
bfs.c | /*
* Imports
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <mpi.h>
#include <omp.h>
#include "filing.c"
#include "queue.c"
/* Constants */
#define PRINT_VECS 1 // flag so we can turn off printing when N is large
#define MAX_RAND 100 // max value of elements generated for array
/* Prototypes */
void init_vec(int *vec, int len);
void print_vec(const char *label, int *vec, int len);
/* Functions */
// Fills a vector with random integers in the range [0, MAX_RAND)
void init_vec(int *vec, int len)
{
int i;
for (i = 0; i < len; i++)
{
vec[i] = rand() % MAX_RAND;
}
}
// Prints the given vector to stdout
void print_vec(const char *label, int *vec, int len)
{
#if PRINT_VECS
printf("%s", label);
int i;
for (i = 0; i < len; i++)
{
printf("%d ", vec[i]);
}
printf("\n\n");
#endif
}
void bfs_sequential(int** graph, int source, int n)
{
int visited[n];
struct Queue* q = createQueue();
for(int i = 0; i < n; i++)
visited[i] = 0;
visited[source] = 1;
enqueue(q, source);
while(!isEmpty(q)){
int u = dequeue(q);
for(int v = 0; v < n; v++)
{
if(graph[u][v] == 1)
{
if(visited[v] == 0){
visited[v] = 1;
enqueue(q, v);
}
}
}
}
}
void bfs_sequential_top_down(int** graph, int source, int n)
{
int parent[n];
for(int i = 0; i < n; i++)
parent[i] = -1;
parent[source] = source;
struct Queue* frontier = createQueue();
enqueue(frontier, source);
struct Queue* next = NULL;
while(frontier != NULL)
{
while(!isEmpty(frontier)){
int u = dequeue(frontier);
for(int v = 0; v < n; v++)
{
if(graph[u][v] == 1)
{
if(next == NULL)
{
next = createQueue();
}
if(parent[v] == -1)
{
enqueue(next, v);
parent[v] = u;
}
}
}
}
frontier = next; next = NULL;
}
}
void bfs_sequential_bottom_up(int** graph, int source, int n)
{
int parent[n];
for(int i = 0; i < n; i++)
parent[i] = -1;
parent[source] = source;
struct Queue* frontier = createQueue();
enqueue(frontier, source);
struct Queue* next = NULL;
while(frontier != NULL)
{
for(int u = 0; u < n; u++)
{
if(parent[u] == -1)
{
for(int v = 0; v < n; v++)
{
if(graph[u][v] == 1 && isVInQueue(frontier, v) == 1)
{
if(next == NULL)
{
next = createQueue();
}
enqueue(next, u);
parent[u] = v;
break;
}
}
}
}
frontier = next; next = NULL;
}
}
int main(int argc, char *argv[])
{
// Declare process-related vars
// and initialize MPI
int rank;
int num_procs;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank); //grab this process's rank
MPI_Comm_size(MPI_COMM_WORLD, &num_procs); //grab the total num of processes
int** graph;
double start_time;
double stop_time;
char* file_name = "Sparse500.txt";
int n = get_n(file_name);
// init graph nxn matrix
graph = (int **)malloc(n * sizeof(int *));
for (int i = 0; i < n; i++)
{
graph[i] = (int *)malloc(n * sizeof(int));
for (int j = 0; j < n; j++)
{
graph[i][j] = 0;
}
}
load(graph, file_name);
if(!rank)
{
int edges = 0;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
if(graph[i][j] == 1)
edges++;
}
}
printf("Edges: %d\n", edges);
printf("Procs: %d\n", num_procs);
printf("File: %s\n", file_name);
}
srand(time(NULL));
start_time = MPI_Wtime();
int chunk_size = n / num_procs;
int owner[n];
// assign first chunk to proc 0
for(int i = 0; i < chunk_size; i++)
{
owner[i] = 0;
}
// assign othe chunks to respective processors
int curr_owner = 0;
for(int i = chunk_size; i < n; i++)
{
if(i % chunk_size == 0 && curr_owner < num_procs - 1)
curr_owner++;
owner[i] = curr_owner;
}
int source = 0;
int visited[n];
// init visited array for each proc
for(int i = 0; i < n; i++)
visited[i] = 0;
// init queue for each proc
struct Queue* frontier = createQueue();
// if proc 0, enqueue root
if(rank == 0)
{
for(int v = 0; v < n; v++)
{
if(graph[source][v] == 1)
{
enqueue(frontier, v);
}
}
visited[source] = 1;
}
// init 2D send buffer for each proc
int **sendBuf = (int **)malloc(num_procs * sizeof(int *));
for (int i = 0; i < num_procs; i++)
{
sendBuf[i] = (int *)malloc(n * sizeof(int));
for(int j = 0; j < n; j++)
sendBuf[i][j] = 0;
}
// init 1D recv buffer for each proc
int* recvBuf = (int *)malloc(n * sizeof(int));
int sendTo[n];
for(int i = 0; i < n; i++)
sendTo[i] = 0;
// BFS
while(frontier != NULL)
{
struct Queue* remote = createQueue();
struct Queue* local = createQueue();
// set local and remote vertices
assignLocalAndRemoteVertices(local, remote, frontier, rank, owner);
free(frontier);
frontier = NULL;
// add vertices from remote to send buf
while(!isEmpty(remote))
{
int v = dequeue(remote);
sendBuf[owner[v]][v] = 1;
sendTo[owner[v]] = 1;
}
// send respective remote vertices
MPI_Request request;
for(int proc = 0; proc < num_procs; proc++)
{
if(proc != rank && sendTo[proc] == 1)
{
MPI_Isend(sendBuf[proc], n, MPI_INT, proc, 1, MPI_COMM_WORLD, &request);
sendTo[proc] = 0;
}
}
// process local vertices
while(!isEmpty(local))
{
int v = dequeue(local);
if(visited[v] == 0)
{
int u;
#pragma omp parallel for private(u)
for(u = 0; u < n; u++)
if(graph[v][u] == 1)
{
#pragma omp critical
{
if(frontier == NULL)
{
frontier = createQueue();
}
enqueue(frontier, u);
}
}
visited[v] = 1;
}
}
// recv vertices from remote and enqueue neighbours in frontier
MPI_Status status;
MPI_Recv(recvBuf, n, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
int j;
#pragma omp parallel for private(j)
for(j = 0; j < n; j++)
{
if(recvBuf[j] == 1)
{
if(visited[j] == 0)
{
for(int k = 0; k < n; k++)
{
if(graph[j][k] == 1)
{
#pragma omp critical
{
if(frontier == NULL)
{
frontier = createQueue();
}
enqueue(frontier, k);
}
}
}
visited[j] = 1;
}
}
}
}
// wait for all procs to finish -- so execution time noted is correct
MPI_Barrier(MPI_COMM_WORLD);
if (!rank)
{
stop_time = MPI_Wtime();
printf("Total time (sec): %f\n", stop_time - start_time);
start_time = MPI_Wtime();
bfs_sequential(graph, 0, n);
stop_time = MPI_Wtime();
printf("Total time BFS_Sequential (sec): %f\n", stop_time - start_time);
printf("BFS TOP DOWN\n");
start_time = MPI_Wtime();
bfs_sequential_top_down(graph, 0, n);
stop_time = MPI_Wtime();
printf("Total time BFS_Sequential Top-Down (sec): %f\n", stop_time - start_time);
printf("BFS BOTTOM UP\n");
start_time = MPI_Wtime();
bfs_sequential_bottom_up(graph, 0, n);
stop_time = MPI_Wtime();
printf("Total time BFS_Sequential Bottom-Up (sec): %f\n", stop_time - start_time);
}
// Shutdown MPI (important - don't forget!)
MPI_Finalize();
return EXIT_SUCCESS;;
}
|
ast-dump-openmp-target-update.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(int x) {
#pragma omp target update to(x)
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-update.c:3:1, line:5:1> line:3:6 test 'void (int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:5:1>
// CHECK-NEXT: `-OMPTargetUpdateDirective {{.*}} <line:4:1, col:32> openmp_standalone_directive
// CHECK-NEXT: |-OMPToClause {{.*}} <col:27, col:31>
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <col:1>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:1>
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-update.c:4:1) *const restrict'
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
interior,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
width=frame_info->width-frame_info->x-bevel_width;
height=frame_info->height-frame_info->y-bevel_width;
if ((width < image->columns) || (height < image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if (frame_image->matte_color.opacity != OpaqueOpacity)
frame_image->matte=MagickTrue;
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&interior);
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&interior);
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&interior);
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
frame_view=AcquireCacheView(frame_image);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior to interior color.
*/
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&interior,q,frame_indexes);
q++;
frame_indexes++;
}
else
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
{
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
(void) CompositeImage(frame_image,image->compose,image,x,y);
}
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=(Quantum) QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=(Quantum) QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
crop_and_resize.c | #include <TH/TH.h>
#include <stdio.h>
#include <math.h>
void CropAndResizePerBox(
const float * image_data,
const int batch_size,
const int depth,
const int image_height,
const int image_width,
const float * boxes_data,
const int * box_index_data,
const int start_box,
const int limit_box,
float * corps_data,
const int crop_height,
const int crop_width,
const float extrapolation_value
) {
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
int b;
#pragma omp parallel for
for (b = start_box; b < limit_box; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
for (int x = 0; x < crop_width; ++x)
{
for (int d = 0; d < depth; ++d)
{
// crops(b, y, x, d) = extrapolation_value;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
for (int d = 0; d < depth; ++d)
{
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
const float *pimage = image_data + b_in * image_elements + d * image_channel_elements;
const float top_left = pimage[top_y_index * image_width + left_x_index];
const float top_right = pimage[top_y_index * image_width + right_x_index];
const float bottom_left = pimage[bottom_y_index * image_width + left_x_index];
const float bottom_right = pimage[bottom_y_index * image_width + right_x_index];
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp;
}
} // end for x
} // end for y
} // end for b
}
void crop_and_resize_forward(
THFloatTensor * image,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
const float extrapolation_value,
const int crop_height,
const int crop_width,
THFloatTensor * crops
) {
const int batch_size = image->size[0];
const int depth = image->size[1];
const int image_height = image->size[2];
const int image_width = image->size[3];
const int num_boxes = boxes->size[0];
// init output space
THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width);
THFloatTensor_zero(crops);
// crop_and_resize for each box
CropAndResizePerBox(
THFloatTensor_data(image),
batch_size,
depth,
image_height,
image_width,
THFloatTensor_data(boxes),
THIntTensor_data(box_index),
0,
num_boxes,
THFloatTensor_data(crops),
crop_height,
crop_width,
extrapolation_value
);
}
void crop_and_resize_backward(
THFloatTensor * grads,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
THFloatTensor * grads_image // resize to [bsize, c, hc, wc]
)
{
// shape
const int batch_size = grads_image->size[0];
const int depth = grads_image->size[1];
const int image_height = grads_image->size[2];
const int image_width = grads_image->size[3];
const int num_boxes = grads->size[0];
const int crop_height = grads->size[2];
const int crop_width = grads->size[3];
// n_elements
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
// init output space
THFloatTensor_zero(grads_image);
// data pointer
const float * grads_data = THFloatTensor_data(grads);
const float * boxes_data = THFloatTensor_data(boxes);
const int * box_index_data = THIntTensor_data(box_index);
float * grads_image_data = THFloatTensor_data(grads_image);
for (int b = 0; b < num_boxes; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements;
const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x];
const float dtop = (1 - y_lerp) * grad_val;
pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop;
pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop;
const float dbottom = y_lerp * grad_val;
pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom;
pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom;
} // end d
} // end x
} // end y
} // end b
}
|
par_gsmg.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Geometrically smooth interpolation multigrid
*
*****************************************************************************/
#include <stdio.h>
#include <math.h>
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "_hypre_lapack.h"
#ifndef ABS
#define ABS(x) ((x)>0 ? (x) : -(x))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x)
{
HYPRE_Real temp = 0.;
HYPRE_Int i;
for (i=0; i<n; i++)
temp = temp + x[i]*x[i];
return sqrt(temp);
}
static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x)
{
HYPRE_Int i;
for (i=0; i<n; i++)
x[i] = a * x[i];
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFillSmooth
* - fill in smooth matrix
* - this function will scale the smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples,
hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A,
HYPRE_Int num_functions, HYPRE_Int *dof_func)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, k, ii, index, start;
HYPRE_Int num_cols_offd;
HYPRE_Int num_sends;
HYPRE_Int *dof_func_offd;
HYPRE_Int *int_buf_data;
HYPRE_Real temp;
HYPRE_Real *p;
HYPRE_Real *p_offd;
HYPRE_Real *p_ptr;
HYPRE_Real *buf_data;
HYPRE_Real nm;
#if 0
HYPRE_Real mx = 0., my = 1.e+10;
#endif
/* normalize each sample vector and divide by number of samples */
for (k=0; k<nsamples; k++)
{
nm = mydnrm2(n, samples+k*n);
nm = 1./nm/nsamples;
mydscal(n, nm, samples+k*n);
}
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST);
p_ptr = p_offd;
p = samples;
for (k = 0; k < nsamples; k++)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
buf_data[index++]
= p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data,
p_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
p = p+n;
p_offd = p_offd+num_cols_offd;
}
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n; i++)
{
for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++)
{
ii = S_diag_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func[ii])
{
S_diag_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_diag_data[j] == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p[ii]);
p = p + n;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_diag_data[j] = temp;
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
ii = S_offd_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func_offd[ii])
{
S_offd_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_offd_data[j] == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
p_offd = p_ptr;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p_offd[ii]);
p = p + n;
p_offd = p_offd + num_cols_offd;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_offd_data[j] = temp;
}
}
#if 0
hypre_printf("MIN, MAX: %f %f\n", my, mx);
#endif
hypre_TFree(p_ptr, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixChooseThresh
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j;
HYPRE_Real mx, minimax = 1.e+10;
HYPRE_Real minmin;
for (i=0; i<n; i++)
{
mx = 0.;
for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++)
mx = hypre_max(mx, S_diag_data[j]);
for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++)
mx = hypre_max(mx, S_offd_data[j]);
if (mx != 0.)
minimax = hypre_min(minimax, mx);
}
hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm);
return minmin;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int count, i, jS, jA;
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_diag; i++)
if (A_diag_data[i] >= thresh)
count++;
/* allocate vectors */
S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= thresh)
{
S_diag_data[jS] = A_diag_data[jA];
S_diag_j[jS] = A_diag_j[jA];
jS++;
}
}
}
S_diag_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_diag) = jS;
/* free the vectors we don't need */
hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_diag) = S_diag_i;
hypre_CSRMatrixJ(A_diag) = S_diag_j;
hypre_CSRMatrixData(A_diag) = S_diag_data;
/*
* Offd part
*/
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_offd; i++)
if (A_offd_data[i] >= thresh)
count++;
/* allocate vectors */
S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= thresh)
{
S_offd_data[jS] = A_offd_data[jA];
S_offd_j[jS] = A_offd_j[jA];
jS++;
}
}
}
S_offd_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_offd) = jS;
/* free the vectors we don't need */
hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_offd) = S_offd_i;
hypre_CSRMatrixJ(A_offd) = S_offd_j;
hypre_CSRMatrixData(A_offd) = S_offd_data;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothVecs
* - smoother depends on the level being used
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothVecs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Int num_sweeps,
HYPRE_Int level,
HYPRE_Real **SmoothVecs_p)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_ParVector *Zero;
hypre_ParVector *Temp;
hypre_ParVector *U;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int i;
HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int sample;
HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data);
HYPRE_Int ret;
HYPRE_Real *datax, *bp, *p;
HYPRE_Int rlx_type;
HYPRE_Int smooth_type;
HYPRE_Int smooth_option = 0;
HYPRE_Int smooth_num_levels;
HYPRE_Solver *smoother;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (debug_flag >= 1)
hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps,
nsamples);
smooth_type = hypre_ParAMGDataSmoothType(amg_data);
smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data);
if (smooth_num_levels > level)
{
smooth_option = smooth_type;
smoother = hypre_ParAMGDataSmoother(amg_data);
num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data);
}
rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0];
/* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */
/* omega = hypre_ParAMGDataOmega(amg_data)[level]; */
/* generate par vectors */
Zero = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Zero,0);
hypre_ParVectorInitialize(Zero);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero));
for (i=0; i<n_local; i++)
datax[i] = 0.;
Temp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Temp,0);
hypre_ParVectorInitialize(Temp);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp));
for (i=0; i<n_local; i++)
datax[i] = 0.;
U = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(U,0);
hypre_ParVectorInitialize(U);
datax = hypre_VectorData(hypre_ParVectorLocalVector(U));
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Qtemp);
hypre_ParVectorSetPartitioningOwner(Qtemp,0);
}
/* allocate space for the vectors */
bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST);
p = bp;
/* generate random vectors */
for (sample=0; sample<nsamples; sample++)
{
for (i=0; i<n_local; i++)
datax[i] = hypre_Rand() - .5;
for (i=0; i<num_sweeps; i++)
{
if (smooth_option == 6)
{
HYPRE_SchwarzSolve(smoother[level],
(HYPRE_ParCSRMatrix) A,
(HYPRE_ParVector) Zero,
(HYPRE_ParVector) U);
}
else
{
ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/,
rlx_type , 0 /*rel pts*/, 1.0 /*weight*/,
1.0 /*omega*/, NULL, U, Temp,
Qtemp);
hypre_assert(ret == 0);
}
}
/* copy out the solution */
for (i=0; i<n_local; i++)
*p++ = datax[i];
}
hypre_ParVectorDestroy(Zero);
hypre_ParVectorDestroy(Temp);
hypre_ParVectorDestroy(U);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
*SmoothVecs_p = bp;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothDirs replaces CreateS in AMG
* - smoother depends on the level being used
* - in this version, CreateSmoothVecs must be called prior to this function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothDirs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Real *SmoothVecs,
HYPRE_Real thresh,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParCSRMatrix *S;
HYPRE_Real minimax;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
S = hypre_ParCSRMatrixClone(A, 0);
/* Traverse S and fill in differences */
hypre_ParCSRMatrixFillSmooth(
hypre_ParAMGDataNumSamples(amg_data), SmoothVecs,
S, A, num_functions, dof_func);
minimax = hypre_ParCSRMatrixChooseThresh(S);
if (debug_flag >= 1)
hypre_printf("Minimax chosen: %f\n", minimax);
/* Threshold and compress */
hypre_ParCSRMatrixThreshold(S, thresh*minimax);
*S_ptr = S;
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGNormalizeVecs
*
* Normalize the smooth vectors and also make the first vector the constant
* vector
*
* inputs:
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
*
* output:
* V = adjusted smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V)
{
HYPRE_Int i, j;
HYPRE_Real nrm;
/* change first vector to the constant vector */
for (i=0; i<n; i++)
V[i] = 1.0;
for (j=0; j<num; j++)
{
nrm = mydnrm2(n, &V[j*n]);
mydscal(n, 1./nrm, &V[j*n]);
}
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGFitVectors
*
* Construct interpolation weights based on fitting smooth vectors
*
* inputs:
* ip = row number of row in P being processed (0-based)
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
* nc = number of coarse grid points
* ind = indices of coarse grid points (0-based)
*
* output:
* val = interpolation weights for the coarse grid points
* V = smooth vectors; first one has been changed to constant vector;
* vectors have also been normalized; this is also an input
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V,
HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val)
{
HYPRE_Real *a, *b;
HYPRE_Real *ap;
HYPRE_Int i, j;
HYPRE_Real *work;
HYPRE_Int work_size;
HYPRE_Int info;
HYPRE_Int temp;
/*
hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc);
for (i=0; i<nc; i++)
hypre_printf("%d ", ind[i]);
hypre_printf("\n");
*/
if (nc == 0)
return 0;
work_size = 2000*64;
work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST);
a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST);
ap = a;
for (j=0; j<nc; j++)
{
for (i=0; i<num; i++)
{
*ap = V[i*n+ind[j]];
ap++;
}
}
temp = MAX(nc, num);
b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST);
for (i=0; i<num; i++)
b[i] = V[i*n+ip];
{
char trans = 'N';
HYPRE_Int one = 1;
hypre_dgels(&trans, &num, &nc, &one, a, &num,
b, &temp, work, &work_size, &info);
if (info != 0)
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n");
/* copy solution into output vector */
for (j=0; j<nc; j++)
val[j] = b[j];
}
hypre_TFree(b, HYPRE_MEMORY_HOST);
hypre_TFree(a, HYPRE_MEMORY_HOST);
hypre_TFree(work, HYPRE_MEMORY_HOST);
return info;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpLS
*
* Interpolation built from fitting smooth vectors
* - sequential version only
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int num_smooth,
HYPRE_Real *SmoothVecs,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
/* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
/* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
/* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
//HYPRE_Real *S_ext_data;
//HYPRE_Int *S_ext_i;
//HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker;
/* HYPRE_Int *P_marker_offd; */
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
/* HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd; */
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
//HYPRE_BigInt *big_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
//S_ext_i = hypre_CSRMatrixI(S_ext);
//S_ext_j = hypre_CSRMatrixBigJ(S_ext);
//S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
/* removed */
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
HYPRE_Int kk;
HYPRE_Int indices[1000]; /* kludge */
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
kk = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i1];
jj_counter++;
indices[kk] = i1;
kk++;
}
}
hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,
kk, indices, &P_diag_data[P_diag_i[i]]);
/* Off-Diagonal part of P */
/* undone */
}
}
}
P_diag_i[i] = jj_counter; /* check that this is in right place for threads */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpGSMG
*
* Difference with hypre_BoomerAMGBuildInterp is that S contains values
* and is used to build interpolation weights. Matrix A is not used.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int *tmp_map_offd = NULL;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
//HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_BigInt big_i2;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int start;
HYPRE_Int c_num;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
total_global_cpts = 0; /* we will set this later for the matrix in the setup */
/* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_S_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
/* Loop over ith row of S. First, the diagonal part of S */
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += S_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
sum += S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
sum += S_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = S_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
P_diag_data[P_marker[i2]]
+= distribute * S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i2]]
+= distribute * S_offd_data[jj1];
}
}
}
else
{
/* do nothing */
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else
{
/* do nothing */
}
}
/*----------------------------------------------------------------
* Still looping over ith row of S. Next, loop over the
* off-diagonal part of S
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = S_offd_j[jj];
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n)
{
/* in the diagonal block */
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
sum += S_ext_data[jj1];
}
else
{
/* in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
sum += S_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = S_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */
{
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]]
+= distribute * S_ext_data[jj1];
}
else
{
/* check to see if it is in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[j]]
+= distribute * S_ext_data[jj1];
}
}
}
}
else
{
/* do nothing */
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else
{
/* do nothing */
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
sum = 0.;
for (jj = jj_begin_row; jj < jj_end_row; jj++)
sum += P_diag_data[jj];
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
sum += P_offd_data[jj];
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= sum;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= sum;
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
|
GB_unaryop__identity_int8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int64
// op(A') function: GB_tran__identity_int8_int64
// C type: int8_t
// A type: int64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int64
(
int8_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum
};
/* A sequence of declaration specifiers in C. */
struct c_declspecs {
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword, typespec_word, 8);
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind, typespec_kind, 3);
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
DEF_VEC_O(c_arg_tag);
DEF_VEC_ALLOC_O(c_arg_tag,gc);
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
VEC(c_arg_tag,gc) *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A VEC of VLA sizes from the parameters. In a function
definition, these are used to ensure that side-effects in sizes
of arrays converted to pointers (such as a parameter int i[n++])
take place; otherwise, they are ignored. */
VEC(tree,gc) *pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern int global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern int c_expand_decl (tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Nonzero means we are reading code that came from a system header file. */
extern int system_header_p;
/* True means global_bindings_p should return false even if the scope stack
says we are in file scope. */
extern bool c_override_global_bindings_to_false;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
init.c | // Test the placement of XOMP_init() in C/C++ input
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
srand48;
if (argc <2 )
exit (1);
int nc = 0;
#pragma omp parallel
#pragma omp master
{
printf("Number of threads = %d\n", omp_get_num_threads());
}
return 0;
}
|
conv_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qwang@openailab.com
*/
#include "conv_kernel_int8_arm.h"
#include "api/c_api.h"
#include "utility/sys_port.h"
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#ifdef __aarch64__
void i8gemm_4x16_a72_int8(int* biases, int8_t* input, int8_t* kernel, long kernel_size, int8_t* output,
int* multi, long output_xy, int* shift, int activation_min, int activation_max);
void i8gemm_4x4_a72_int8(int* biases, int8_t* input, int8_t* kernel, long kernel_size, int8_t* output,
int* multi, long output_xy, int* shift, int activation_min, int activation_max);
void im2col_int8_1x1(int8_t* input, long input_xy, int8_t* col, long col_cnt, long input_chan);
void im2col_int8_3x3(int8_t* input, long input_x, long input_y, long input_chan, int8_t* col, long stride);
// col_start and col_end need to be 16 aligned
// kernel_start need to be 4 aligned
static void i8gemm4x16(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi,
int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end,
int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity)
{
int col_end3 = col_end & 3;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
#pragma omp parallel for num_threads(num_thread)
for(int kernel_num = (kernel_start & -16); kernel_num < (kernel_end & -16); kernel_num += 16)
{
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[64] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
int8_t* output_result = output + kernel_num * output_xy;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x16_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti,
output_xy, pq_shift, activation_min, activation_max);
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x16_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int i = 0; i < 4; i++)
{
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + i * 4 + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[i * 16 + 0];
*(output_line[1] + 0) = result[i * 16 + 5];
*(output_line[2] + 0) = result[i * 16 + 10];
*(output_line[3] + 0) = result[i * 16 + 15];
if((col_end3) >= 2)
{
*(output_line[0] + 1) = result[i * 16 + 4];
*(output_line[1] + 1) = result[i * 16 + 1];
*(output_line[2] + 1) = result[i * 16 + 14];
*(output_line[3] + 1) = result[i * 16 + 11];
}
if((col_end3) == 3)
{
*(output_line[0] + 2) = result[i * 16 + 8];
*(output_line[1] + 2) = result[i * 16 + 13];
*(output_line[2] + 2) = result[i * 16 + 2];
*(output_line[3] + 2) = result[i * 16 + 7];
}
}
}
}
return;
}
// col_start and kernel_start need to be 4 aligned
static void i8gemm4x4(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi,
int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end,
int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity)
{
int col_end3 = col_end & 3;
int kernel_end3 = kernel_end & 3;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
#pragma omp parallel for num_threads(num_thread)
for(int kernel_num = kernel_start & -4; kernel_num < (kernel_end & -4); kernel_num += 4)
{
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[16] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
int8_t* output_result = output + kernel_num * output_xy;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti,
output_xy, pq_shift, activation_min, activation_max);
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
*(output_line[1] + 0) = result[5];
*(output_line[2] + 0) = result[10];
*(output_line[3] + 0) = result[15];
if(col_end3 >= 2)
{
*(output_line[0] + 1) = result[4];
*(output_line[1] + 1) = result[1];
*(output_line[2] + 1) = result[14];
*(output_line[3] + 1) = result[11];
}
if(col_end3 == 3)
{
*(output_line[0] + 2) = result[8];
*(output_line[1] + 2) = result[13];
*(output_line[2] + 2) = result[2];
*(output_line[3] + 2) = result[7];
}
}
}
if(kernel_end3)
{
int kernel_num = kernel_end & -4;
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[16] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
*(output_line[0] + 1) = result[4];
*(output_line[0] + 2) = result[8];
*(output_line[0] + 3) = result[12];
if(kernel_end3 >= 2)
{
*(output_line[1] + 0) = result[5];
*(output_line[1] + 1) = result[1];
*(output_line[1] + 2) = result[13];
*(output_line[1] + 3) = result[9];
}
if(kernel_end3 == 3)
{
*(output_line[2] + 0) = result[10];
*(output_line[2] + 1) = result[14];
*(output_line[2] + 2) = result[2];
*(output_line[2] + 3) = result[6];
}
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
if(col_end3 >= 2)
*(output_line[0] + 1) = result[4];
if(col_end3 == 3)
*(output_line[0] + 2) = result[8];
if(kernel_end3 >= 2)
{
*(output_line[1] + 0) = result[5];
if(col_end3 >= 2)
*(output_line[1] + 1) = result[1];
if(col_end3 == 3)
*(output_line[1] + 2) = result[13];
}
if(kernel_end3 == 3)
{
*(output_line[2] + 0) = result[10];
if(col_end3 >= 2)
*(output_line[2] + 1) = result[14];
if(col_end3 == 3)
*(output_line[2] + 2) = result[2];
}
}
}
return;
}
#else
void i8gemm_4x4_a17_int8(int* biases, int8_t* input, int8_t* kernel, int kernel_size, int8_t* output,
int* multi, int output_xy, int* shift, int activation_min, int activation_max);
void i8gemm_4x8_a17_int8(int* biases, int8_t* input, int8_t* kernel, int kernel_size, int8_t* output,
int* multi, int output_xy, int* shift, int activation_min, int activation_max);
// col_start and col_end need to be 8 aligned kernel_start need to be 4 aligned
static void i8gemm4x8(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi,
int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end,
int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity)
{
int col_end3 = col_end & 3;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
#pragma omp parallel for num_threads(num_thread)
for(int kernel_num = (kernel_start & -8); kernel_num < (kernel_end & -8); kernel_num += 8)
{
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[32] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
int8_t* output_result = output + kernel_num * output_xy;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x8_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti,
output_xy, pq_shift, activation_min, activation_max);
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x8_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int i = 0; i < 2; i++)
{
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + i * 4 + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[i * 16 + 0];
*(output_line[1] + 0) = result[i * 16 + 5];
*(output_line[2] + 0) = result[i * 16 + 10];
*(output_line[3] + 0) = result[i * 16 + 15];
if(col_end3 >= 2)
{
*(output_line[0] + 1) = result[i * 16 + 4];
*(output_line[1] + 1) = result[i * 16 + 1];
*(output_line[2] + 1) = result[i * 16 + 14];
*(output_line[3] + 1) = result[i * 16 + 11];
}
if(col_end3 == 3)
{
*(output_line[0] + 2) = result[i * 16 + 8];
*(output_line[1] + 2) = result[i * 16 + 13];
*(output_line[2] + 2) = result[i * 16 + 2];
*(output_line[3] + 2) = result[i * 16 + 7];
}
}
}
}
return;
}
// col_start and kernel_start need to be 4 aligned
static void i8gemm4x4(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi,
int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end,
int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity)
{
int col_end3 = col_end & 3;
int kernel_end3 = kernel_end & 3;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
#pragma omp parallel for num_threads(num_thread)
for(int kernel_num = (kernel_start & -4); kernel_num < (kernel_end & -4); kernel_num += 4)
{
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[16] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
int8_t* output_result = output + kernel_num * output_xy;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti,
output_xy, pq_shift, activation_min, activation_max);
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
*(output_line[1] + 0) = result[5];
*(output_line[2] + 0) = result[10];
*(output_line[3] + 0) = result[15];
if(col_end3 >= 2)
{
*(output_line[0] + 1) = result[4];
*(output_line[1] + 1) = result[1];
*(output_line[2] + 1) = result[14];
*(output_line[3] + 1) = result[11];
}
if(col_end3 == 3)
{
*(output_line[0] + 2) = result[8];
*(output_line[1] + 2) = result[13];
*(output_line[2] + 2) = result[2];
*(output_line[3] + 2) = result[7];
}
}
}
if(kernel_end3)
{
int kernel_num = kernel_end & -4;
int* cur_biases = NULL;
if(bias_term)
{
cur_biases = biases + kernel_num;
}
int result[16] = {0};
int8_t* output_line[4];
int* pmulti = multi + kernel_num;
int* pq_shift = q_shift + kernel_num;
int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2;
for(int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4)
{
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
*(output_line[0] + 1) = result[4];
*(output_line[0] + 2) = result[8];
*(output_line[0] + 3) = result[12];
if(kernel_end3 >= 2)
{
*(output_line[1] + 0) = result[5];
*(output_line[1] + 1) = result[1];
*(output_line[1] + 2) = result[13];
*(output_line[1] + 3) = result[9];
}
if(kernel_end3 == 3)
{
*(output_line[2] + 0) = result[10];
*(output_line[2] + 1) = result[14];
*(output_line[2] + 2) = result[2];
*(output_line[2] + 3) = result[6];
}
}
if(col_end3)
{
int col_line = col_end & -4;
int8_t* cur_col = col + col_line * kernel_size_aligned2;
i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max);
for(int j = 0; j < 4; j++)
{
output_line[j] = output + (kernel_num + j) * output_xy + col_line;
}
*(output_line[0] + 0) = result[0];
if(col_end3 >= 2)
*(output_line[0] + 1) = result[4];
if(col_end3 == 3)
*(output_line[0] + 2) = result[8];
if(kernel_end3 >= 2)
{
*(output_line[1] + 0) = result[5];
if(col_end3 >= 2)
*(output_line[1] + 1) = result[1];
if(col_end3 == 3)
*(output_line[1] + 2) = result[13];
}
if(kernel_end3 == 3)
{
*(output_line[2] + 0) = result[10];
if(col_end3 >= 2)
*(output_line[2] + 1) = result[14];
if(col_end3 == 3)
*(output_line[2] + 2) = result[2];
}
}
}
return;
}
#endif
/*
* get the memory size for im2col + sgemm of kernel tensor interleave
*/
static int get_private_mem_size(struct tensor* filter, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution
return mem_size;
}
int int8_conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int int8_conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 0;
priv_info->im2col_buffer_pack4 = NULL;
priv_info->im2col_buffer_pack4_size = 0;
return 0;
}
int int8_conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param)
{
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int out_cstep = out_h * out_w; // channel cstep, output_h * output_w
int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes
out_cstep = (out_cstep + 3) / 4 * 4;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
int mem_size = elem_size * kernel_size_aligned2 * out_cstep + 128;
return mem_size;
}
void interleave_kernel_int8(int8_t* kernel, int8_t* kernel_int8, int kernel_chan, int kernel_size)
{
#ifdef __aarch64__
int8_t* cur_kernel[16];
int8_t* cur_kernel_int8 = kernel_int8;
int i, j, k;
// interleave 16 kernels
for(i = 0; i < (kernel_chan & -16); i += 16)
{
for(j = 0; j < 16; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
for(j = 0; j < (kernel_size & -2); j += 2)
for(k = 0; k < 16; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
if(kernel_size & 0x1)
for(k = 0; k < 16; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
}
// interleave 4 kernels
for(i = (kernel_chan & -16); i < (kernel_chan & -4); i += 4)
{
for(j = 0; j < 4; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
for(j = 0; j < (kernel_size & -2); j += 2)
for(k = 0; k < 4; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
if(kernel_size & 0x1)
for(k = 0; k < 4; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
}
// last 4 kernels
if((kernel_chan & 0x3) != 0)
{
for(j = 0; j < 3; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
if((kernel_chan & 0x3) == 3)
{
for(j = 0; j < (kernel_size & -2); j += 2)
{
for(k = 0; k < 3; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
for(k = 0; k < 2; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size & 0x1)
{
for(k = 0; k < 3; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
for(k = 0; k < 2; k++)
*(cur_kernel_int8++) = 0;
}
}
else if((kernel_chan & 0x3) == 2)
{
for(j = 0; j < (kernel_size & -2); j += 2)
{
for(k = 0; k < 2; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
for(k = 0; k < 4; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size & 0x1)
{
for(k = 0; k < 2; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
for(k = 0; k < 4; k++)
*(cur_kernel_int8++) = 0;
}
}
else if((kernel_chan & 0x3) == 1)
{
for(j = 0; j < (kernel_size & -2); j += 2)
{
*(cur_kernel_int8++) = *(cur_kernel[0] + j);
*(cur_kernel_int8++) = *(cur_kernel[0] + j + 1);
for(k = 0; k < 6; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size & 0x1)
{
*(cur_kernel_int8++) = *(cur_kernel[0] + j);
for(k = 0; k < 7; k++)
*(cur_kernel_int8++) = 0;
}
}
}
#else
int8_t* cur_kernel[8];
int8_t* cur_kernel_int8 = kernel_int8;
int i, j, k;
int kernel_chan3 = kernel_chan & 0x3;
int kernel_size1 = kernel_size & 0x1;
// interleave 8 kernels
for(i = 0; i < (kernel_chan & -8); i += 8)
{
for(j = 0; j < 8; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
for(j = 0; j < (kernel_size & -2); j += 2)
for(k = 0; k < 8; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
if(kernel_size1)
for(k = 0; k < 8; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
}
// interleave 4 kernels
for(; i < (kernel_chan & -4); i += 4)
{
for(j = 0; j < 4; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
for(j = 0; j < (kernel_size & -2); j += 2)
for(k = 0; k < 4; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
if(kernel_size1)
for(k = 0; k < 4; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
}
// last 4 kernels
if(kernel_chan3)
{
for(j = 0; j < 3; j++)
cur_kernel[j] = kernel + kernel_size * (i + j);
if((kernel_chan3) == 3)
{
for(j = 0; j < (kernel_size & -2); j += 2)
{
for(k = 0; k < 3; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
for(k = 0; k < 2; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size1)
{
for(k = 0; k < 3; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
for(k = 0; k < 2; k++)
*(cur_kernel_int8++) = 0;
}
}
else if((kernel_chan3) == 2)
{
for(j = 0; j < (kernel_size & -2); j += 2)
{
for(k = 0; k < 2; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = *(cur_kernel[k] + j + 1);
}
for(k = 0; k < 4; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size1)
{
for(k = 0; k < 2; k++)
{
*(cur_kernel_int8++) = *(cur_kernel[k] + j);
*(cur_kernel_int8++) = 0;
}
for(k = 0; k < 4; k++)
*(cur_kernel_int8++) = 0;
}
}
else
{ // kernel_chan & 0x3 == 1
for(j = 0; j < (kernel_size & -2); j += 2)
{
*(cur_kernel_int8++) = *(cur_kernel[0] + j);
*(cur_kernel_int8++) = *(cur_kernel[0] + j + 1);
for(k = 0; k < 6; k++)
*(cur_kernel_int8++) = 0;
}
if(kernel_size1)
{
*(cur_kernel_int8++) = *(cur_kernel[0] + j);
for(k = 0; k < 7; k++)
*(cur_kernel_int8++) = 0;
}
}
}
#endif
return;
}
/* kernel interleave */
static void interleave_int8(struct tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param)
{
int group = param->group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size_algin = kernel_size * out_chan_align4;
int kernel_size_group = kernel_size * out_chan;
int8_t* kernel = filter->data;
int8_t* interleave_buf = priv_info->interleave_buffer;
for (int g = 0; g < group; g++)
{
int8_t* cur_kernel = kernel + g * kernel_size_group;
int8_t* cur_interleave = interleave_buf + g * kernel_size_algin;
interleave_kernel_int8(cur_kernel, cur_interleave, out_chan, kernel_size);
}
}
static void im2col_int8(int8_t* im, int8_t* col, int input_chan, int input_x, int input_y, int kernel_x, int kernel_y, int stride_x, int stride_y, int dilation_x,
int dilation_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1, int output_x, int output_y, int num_thread)
{
int col_start = 0;
int col_end = output_x * output_y;
int kernel_xy = kernel_x * kernel_y;
int kernel_size = kernel_xy * input_chan;
int kernel_size_aligned2 = (kernel_size + 1) & -2;
int input_xy = input_x * input_y;
int col_end3 = col_end & 0x3;
int kernel_size1 = kernel_size & 0x1;
int is_1x1 = (kernel_x == 1) && (kernel_y == 1) && (stride_x == 1) && (stride_y == 1);
int is_3x3 = (kernel_x == 3) && (kernel_y == 3) && (dilation_x == 1) && (dilation_y == 1);
bool is_pad0 = (pad_x0 == 0) && (pad_y0 == 0) && (pad_x1 == 0) && (pad_y1 == 0);
#ifdef __aarch64__
// is 1x1
if(is_1x1)
{
int8_t* cur_col = col + col_start * kernel_size_aligned2;
int col_cnt = (col_end & -4) - (col_start & -4);
im2col_int8_1x1(( int8_t* )im + col_start, input_xy, cur_col, col_cnt, kernel_size);
cur_col += col_cnt * kernel_size_aligned2;
int col_i = col_end & -4;
// final 4 input
if(col_end3)
{
for(int kch = 0; kch < (kernel_size & -2); kch += 2)
{
for(int i = 0; i < 4; i++)
{
if((col_i + i) < col_end)
{
*cur_col++ = *(im + input_xy * (kch + 0) + col_i + i);
*cur_col++ = *(im + input_xy * (kch + 1) + col_i + i);
}
else
{
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
int kch = kernel_size & -2;
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
if((col_i + i) < col_end)
{
*cur_col++ = *(im + input_xy * (kch + 0) + col_i + i);
*cur_col++ = 0;
}
else
{
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
}
// 3x3 non dilation
else if(is_3x3)
{
#pragma omp parallel for num_threads(num_thread)
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int imx[4] = {0};
int imy[4] = {0};
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
if((cnt_y[0] == cnt_y[3]) &&
(is_pad0 || (cnt_y[0] > 0 && cnt_x[0] > 0 && cnt_y[0] < (output_y - 1) && cnt_x[3] < (output_x - 1))))
{
int8_t* input_start = ( int8_t* )(im + imy_start[0] * input_x + imx_start[0]);
im2col_int8_3x3(input_start, input_x, input_y, input_chan, cur_col, stride_x);
cur_col += 4 * kernel_size_aligned2;
}
else
{
bool odd_line = false;
int kchp = 0;
int kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < 3; ky++)
{
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky;
if(imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + 1 + k;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
odd_line = false;
}
// even line 2n
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky;
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + k;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
kchp = kch;
kyp = ky;
odd_line = true;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
int col_i = col_end & -4;
if(col_end3)
{
int imx[4] = {0};
int imy[4] = {0};
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
int kchp = 0;
int kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < 3; ky++)
{
// odd line 1 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky;
if((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (1 + k);
if((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
odd_line = false;
}
// even line 2n + 1
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky;
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + k;
if(i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
kchp = kch;
kyp = ky;
odd_line = true;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
// general case for kernel size <=3
else if((kernel_x) < 4 && (kernel_y < 4))
{
int kch[2], kx[2], ky[2], imx[4][2], imy[4][2];
int8_t* cur_col = col + col_start * kernel_size_aligned2;
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
for(int col_j = 0; col_j < (kernel_size & -2); col_j += 2)
{
for(int k = 0; k < 2; k++)
{
kch[k] = (col_j + k) / kernel_xy;
ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x;
kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x;
ky[k] = ky[k] * dilation_y;
kx[k] = kx[k] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][k] = imx_start[i] + kx[k];
imy[i][k] = imy_start[i] + ky[k];
}
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
if(imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y)
*cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]);
else
*cur_col++ = 0;
}
}
}
int col_j = kernel_size & -2;
if(kernel_size1)
{
kch[0] = col_j / kernel_xy;
ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x;
kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x;
ky[0] = ky[0] * dilation_y;
kx[0] = kx[0] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][0] = imx_start[i] + kx[0];
imy[i][0] = imy_start[i] + ky[0];
if(imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y)
*cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
int col_i = col_end & -4;
// final 4 input
if(col_end3)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
for(int col_j = 0; col_j < (kernel_size & -2); col_j += 2)
{
for(int k = 0; k < 2; k++)
{
kch[k] = (col_j + k) / kernel_xy;
ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x;
kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x;
ky[k] = ky[k] * dilation_y;
kx[k] = kx[k] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][k] = imx_start[i] + kx[k];
imy[i][k] = imy_start[i] + ky[k];
}
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
if((col_i + i) < col_end && imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 &&
imy[i][k] < input_y)
*cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]);
else
*cur_col++ = 0;
}
}
}
int col_j = kernel_size & -2;
if(kernel_size1)
{
kch[0] = col_j / kernel_xy;
ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x;
kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x;
ky[0] = ky[0] * dilation_y;
kx[0] = kx[0] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][0] = imx_start[i] + kx[0];
imy[i][0] = imy_start[i] + ky[0];
if((col_i + i) < col_end && imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y)
*cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
// general case for kernel size >=3
else
{
int kch, kx, ky, kchp, kyp, imx[4], imy[4] = {0};
int kernel_x1 = kernel_x & 0x1;
int8_t* cur_col = col + col_start * kernel_size_aligned2;
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
kchp = 0;
kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(ky = 0; ky < kernel_y; ky++)
{
// odd line 2 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky * dilation_y;
if(imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(kx = 1; kx < kernel_x; kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
odd_line = false;
}
// even line 2n
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky * dilation_y;
for(kx = 0; kx < (kernel_x - 1); kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
kchp = kch;
kyp = ky;
odd_line = kernel_x1 ? true : false;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
int col_i = col_end & -4;
// final 4 input
if(col_end3)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
kchp = 0;
kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(ky = 0; ky < kernel_y; ky++)
{
// odd line 1 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky * dilation_y;
if((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(kx = 1; kx < kernel_x; kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
odd_line = false;
}
// even line 2n + 1
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky * dilation_y;
for(kx = 0; kx < (kernel_x - 1); kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
kchp = kch;
kyp = ky;
odd_line = kernel_x1 ? true : false;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
#else
if(is_3x3)
{
int stride_x2 = stride_x * 2;
int stride_x3 = stride_x * 3;
// #pragma omp parallel for num_threads(num_thread)
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int imx[4] = {0};
int imy[4] = {0};
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
if((cnt_y[0] == cnt_y[3]) &&
(is_pad0 || (cnt_y[0] > 0 && cnt_x[0] > 0 && cnt_y[0] < (output_y - 1) && cnt_x[3] < (output_x - 1))))
{
int8_t* l00 = ( int8_t* )(im + imy_start[0] * input_x + imx_start[0]);
int8_t* l01 = l00 + input_x;
int8_t* l02 = l00 + input_x * 2;
int8_t* l10 = l00 + input_xy;
int8_t* l11 = l10 + input_x;
int8_t* l12 = l10 + input_x * 2;
for(int kch = 0; kch < (input_chan & -2); kch += 2)
{
cur_col[0] = l00[0];
cur_col[1] = l00[1];
cur_col[2] = l00[0 + stride_x];
cur_col[3] = l00[1 + stride_x];
cur_col[4] = l00[0 + stride_x2];
cur_col[5] = l00[1 + stride_x2];
cur_col[6] = l00[0 + stride_x3];
cur_col[7] = l00[1 + stride_x3];
cur_col[8] = l00[2];
cur_col[9] = l01[0];
cur_col[10] = l00[2 + stride_x];
cur_col[11] = l01[0 + stride_x];
cur_col[12] = l00[2 + stride_x2];
cur_col[13] = l01[0 + stride_x2];
cur_col[14] = l00[2 + stride_x3];
cur_col[15] = l01[0 + stride_x3];
cur_col[16] = l01[1];
cur_col[17] = l01[2];
cur_col[18] = l01[1 + stride_x];
cur_col[19] = l01[2 + stride_x];
cur_col[20] = l01[1 + stride_x2];
cur_col[21] = l01[2 + stride_x2];
cur_col[22] = l01[1 + stride_x3];
cur_col[23] = l01[2 + stride_x3];
cur_col[24] = l02[0];
cur_col[25] = l02[1];
cur_col[26] = l02[0 + stride_x];
cur_col[27] = l02[1 + stride_x];
cur_col[28] = l02[0 + stride_x2];
cur_col[29] = l02[1 + stride_x2];
cur_col[30] = l02[0 + stride_x3];
cur_col[31] = l02[1 + stride_x3];
cur_col[32] = l02[2];
cur_col[33] = l10[0];
cur_col[34] = l02[2 + stride_x];
cur_col[35] = l10[0 + stride_x];
cur_col[36] = l02[2 + stride_x2];
cur_col[37] = l10[0 + stride_x2];
cur_col[38] = l02[2 + stride_x3];
cur_col[39] = l10[0 + stride_x3];
cur_col[40] = l10[1];
cur_col[41] = l10[2];
cur_col[42] = l10[1 + stride_x];
cur_col[43] = l10[2 + stride_x];
cur_col[44] = l10[1 + stride_x2];
cur_col[45] = l10[2 + stride_x2];
cur_col[46] = l10[1 + stride_x3];
cur_col[47] = l10[2 + stride_x3];
cur_col[48] = l11[0];
cur_col[49] = l11[1];
cur_col[50] = l11[0 + stride_x];
cur_col[51] = l11[1 + stride_x];
cur_col[52] = l11[0 + stride_x2];
cur_col[53] = l11[1 + stride_x2];
cur_col[54] = l11[0 + stride_x3];
cur_col[55] = l11[1 + stride_x3];
cur_col[56] = l11[2];
cur_col[57] = l12[0];
cur_col[58] = l11[2 + stride_x];
cur_col[59] = l12[0 + stride_x];
cur_col[60] = l11[2 + stride_x2];
cur_col[61] = l12[0 + stride_x2];
cur_col[62] = l11[2 + stride_x3];
cur_col[63] = l12[0 + stride_x3];
cur_col[64] = l12[1];
cur_col[65] = l12[2];
cur_col[66] = l12[1 + stride_x];
cur_col[67] = l12[2 + stride_x];
cur_col[68] = l12[1 + stride_x2];
cur_col[69] = l12[2 + stride_x2];
cur_col[70] = l12[1 + stride_x3];
cur_col[71] = l12[2 + stride_x3];
cur_col += 72;
l00 += input_xy * 2;
l01 += input_xy * 2;
l02 += input_xy * 2;
l10 += input_xy * 2;
l11 += input_xy * 2;
l12 += input_xy * 2;
}
if(input_chan & 0x1)
{
cur_col[0] = l00[0];
cur_col[1] = l00[1];
cur_col[2] = l00[0 + stride_x];
cur_col[3] = l00[1 + stride_x];
cur_col[4] = l00[0 + stride_x2];
cur_col[5] = l00[1 + stride_x2];
cur_col[6] = l00[0 + stride_x3];
cur_col[7] = l00[1 + stride_x3];
cur_col[8] = l00[2];
cur_col[9] = l01[0];
cur_col[10] = l00[2 + stride_x];
cur_col[11] = l01[0 + stride_x];
cur_col[12] = l00[2 + stride_x2];
cur_col[13] = l01[0 + stride_x2];
cur_col[14] = l00[2 + stride_x3];
cur_col[15] = l01[0 + stride_x3];
cur_col[16] = l01[1];
cur_col[17] = l01[2];
cur_col[18] = l01[1 + stride_x];
cur_col[19] = l01[2 + stride_x];
cur_col[20] = l01[1 + stride_x2];
cur_col[21] = l01[2 + stride_x2];
cur_col[22] = l01[1 + stride_x3];
cur_col[23] = l01[2 + stride_x3];
cur_col[24] = l02[0];
cur_col[25] = l02[1];
cur_col[26] = l02[0 + stride_x];
cur_col[27] = l02[1 + stride_x];
cur_col[28] = l02[0 + stride_x2];
cur_col[29] = l02[1 + stride_x2];
cur_col[30] = l02[0 + stride_x3];
cur_col[31] = l02[1 + stride_x3];
cur_col[32] = l02[2];
cur_col[33] = 0;
cur_col[34] = l02[2 + stride_x];
cur_col[35] = 0;
cur_col[36] = l02[2 + stride_x2];
cur_col[37] = 0;
cur_col[38] = l02[2 + stride_x3];
cur_col[39] = 0;
}
}
else
{
bool odd_line = false;
int kchp = 0;
int kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < 3; ky++)
{
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky;
if(imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + 1 + k;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
odd_line = false;
}
// even line 2n
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky;
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + k;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
kchp = kch;
kyp = ky;
odd_line = true;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
int col_i = col_end & -4;
if(col_end3)
{
int imx[4] = {0};
int imy[4] = {0};
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
int kchp = 0;
int kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < 3; ky++)
{
// odd line 1 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky;
if((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (1 + k);
if((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
odd_line = false;
}
// even line 2n + 1
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky;
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + k;
if(i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
kchp = kch;
kyp = ky;
odd_line = true;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp;
imx[i] = imx_start[i] + 2;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
// general case for kernel size <=3
else if((kernel_x) < 4 && (kernel_y < 4))
{
int kch[2], kx[2], ky[2], imx[4][2], imy[4][2];
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
for(int col_j = 0; col_j < (kernel_size & -2); col_j += 2)
{
for(int k = 0; k < 2; k++)
{
kch[k] = (col_j + k) / kernel_xy;
ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x;
kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x;
ky[k] = ky[k] * dilation_y;
kx[k] = kx[k] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][k] = imx_start[i] + kx[k];
imy[i][k] = imy_start[i] + ky[k];
}
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
if(imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y)
*cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]);
else
*cur_col++ = 0;
}
}
}
int col_j = kernel_size & -2;
if(kernel_size1)
{
kch[0] = col_j / kernel_xy;
ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x;
kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x;
ky[0] = ky[0] * dilation_y;
kx[0] = kx[0] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][0] = imx_start[i] + kx[0];
imy[i][0] = imy_start[i] + ky[0];
if(imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y)
*cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
int col_i = col_end & -4;
// final 4 input
if(col_end3)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
int8_t* cur_col = col + col_i * kernel_size_aligned2;
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
for(int col_j = 0; col_j < (kernel_size & -2); col_j += 2)
{
for(int k = 0; k < 2; k++)
{
kch[k] = (col_j + k) / kernel_xy;
ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x;
kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x;
ky[k] = ky[k] * dilation_y;
kx[k] = kx[k] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][k] = imx_start[i] + kx[k];
imy[i][k] = imy_start[i] + ky[k];
}
}
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
if((col_i + i) < col_end && imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 &&
imy[i][k] < input_y)
*cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]);
else
*cur_col++ = 0;
}
}
}
int col_j = kernel_size & -2;
if(kernel_size1)
{
kch[0] = col_j / kernel_xy;
ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x;
kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x;
ky[0] = ky[0] * dilation_y;
kx[0] = kx[0] * dilation_x;
for(int i = 0; i < 4; i++)
{
imx[i][0] = imx_start[i] + kx[0];
imy[i][0] = imy_start[i] + ky[0];
if((col_i + i) < col_end && imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 &&
imy[i][0] < input_y)
*cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
// general case for kernel size >=3
else
{
int kch, kx, ky, kchp, kyp, imx[4], imy[4];
int kernel_x1 = kernel_x & 0x1;
int8_t* cur_col = col + col_start * kernel_size_aligned2;
for(int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
kchp = 0;
kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < kernel_y; ky++)
{
// odd line 2 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky * dilation_y;
if(imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int kx = 1; kx < kernel_x; kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
odd_line = false;
}
// even line 2n
else
{
for(int i = 0; i < 4; i++)
imy[i] = imy_start[i] + ky * dilation_y;
for(int kx = 0; kx < (kernel_x - 1); kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
kchp = kch;
kyp = ky;
odd_line = kernel_x1 ? true : false;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if(imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
int col_i = col_end & -4;
// final 4 input
if(col_end3)
{
int cnt_x[4] = {0};
int cnt_y[4] = {0};
int imx_start[4] = {0};
int imy_start[4] = {0};
for(int i = 0; i < 4; i++)
{
cnt_y[i] = (col_i + i) / output_x;
cnt_x[i] = col_i + i - cnt_y[i] * output_x;
imx_start[i] = cnt_x[i] * stride_x - pad_x0;
imy_start[i] = cnt_y[i] * stride_y - pad_y0;
}
bool odd_line = false;
kchp = 0;
kyp = 0;
for(int kch = 0; kch < input_chan; kch++)
{
for(int ky = 0; ky < kernel_y; ky++)
{
// odd line 1 + 2n
if(odd_line)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
imy[i] = imy_start[i] + ky * dilation_y;
if((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]);
else
*cur_col++ = 0;
}
for(int kx = 1; kx < kernel_x; kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
odd_line = false;
}
// even line 2n + 1
else
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + ky * dilation_y;
}
for(int kx = 0; kx < (kernel_x - 1); kx += 2)
{
for(int i = 0; i < 4; i++)
{
for(int k = 0; k < 2; k++)
{
imx[i] = imx_start[i] + (kx + k) * dilation_x;
if(i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
}
}
}
kchp = kch;
kyp = ky;
odd_line = kernel_x1 ? true : false;
}
}
}
if(kernel_size1)
{
for(int i = 0; i < 4; i++)
{
imy[i] = imy_start[i] + kyp * dilation_y;
imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x;
if((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0;
*cur_col++ = 0;
}
}
}
}
#endif
return;
}
int int8_conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
/* alloc mem of im2col */
if (!priv_info->external_im2col_mem)
{
int mem_size = int8_conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
/* alloc mem of kernel interleave */
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
/* kernel interleave */
interleave_int8(filter_tensor, priv_info, param);
priv_info->multi = (int*)sys_malloc(out_c * sizeof(int));
priv_info->q_shift = (int*)sys_malloc(out_c * sizeof(int));
float input_scale = input_tensor->scale;
float* kernel_scales = filter_tensor->scale_list;
float output_scale = output_tensor->scale;
priv_info->activation_min = -127;
priv_info->activation_max = 127;
/* set activation */
if(param->activation >= 0)
{
priv_info->activation_min = 0;
if(param->activation == 1)
priv_info->activation_max = round(1.0 / output_scale);
if(param->activation == 6)
priv_info->activation_max = round(6.0 / output_scale);
if(priv_info->activation_max > 127)
priv_info->activation_max = 127;
}
for(int i=0; i<out_c; i++)
{
float kernel_scale = kernel_scales[i];
float scale = input_scale * kernel_scale / output_scale;
int shift;
float q = frexp(scale, &shift);
int fix_q = round(q * (1ll << 31));
// TLOG_ERR("prerun: %f,%lld,%d,%d, %lld\n",q, fix_q, multi, q_shift, 1ll<<31);
if(fix_q == (1l << 31))
{
fix_q /= 2;
shift++;
}
priv_info->multi[i] = (int)fix_q;
priv_info->q_shift[i] = (int)shift;
}
return 0;
}
int int8_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (priv_info->multi)
{
sys_free(priv_info->multi);
priv_info->multi = NULL;
}
if (priv_info->q_shift)
{
sys_free(priv_info->q_shift);
priv_info->q_shift = NULL;
}
return 0;
}
int int8_conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
int activation_min = priv_info->activation_min;
int activation_max = priv_info->activation_max;
/* buffer addr */
int8_t* input_buf = ( int8_t* )input_tensor->data;
int8_t* output_buf = ( int8_t* )output_tensor->data;
int32_t* biases_buf = NULL;
bool have_biases = false;
if (bias_tensor != NULL)
{
biases_buf = (int32_t*)bias_tensor->data;
have_biases = true;
}
int8_t* col_buf = ( int8_t* )priv_info->im2col_buffer;
int8_t* interleave_buf = ( int8_t* )priv_info->interleave_buffer;
/* block size split parameter */
int L2_CACHE_SIZE = (cpu_affinity == TENGINE_CLUSTER_LITTLE)? 512 * 1024 : 1024 * 1024;
int kernel_size_l1 = kernel_size;
#ifdef __aarch64__
int col_cnt_l2 = L2_CACHE_SIZE * 3 / kernel_size_l1 / 4;
#else
int col_cnt_l2 = L2_CACHE_SIZE / 4 / kernel_size_l1 * 3 / 4;
#endif
col_cnt_l2 = col_cnt_l2 > 4 ? (col_cnt_l2 & -4) : 4;
for (int n = 0; n < batch; n++) // batch size
{
int8_t* input = input_buf + n * input_size * group;
int8_t* output = output_buf + n * output_size * group;
for (int g = 0; g < group; g++)
{
int8_t* cur_input = input + g * input_size;
im2col_int8(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h,
pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread);
int kernel_size_aligned2 = (kernel_size + 1) & -2;
int output_chan_aligned4 = (out_c + 3) & -4;
int8_t* kernel_g = interleave_buf + g * kernel_size_aligned2 * output_chan_aligned4;
int8_t* output_g = output + g * output_size;
int* bias_g = have_biases ? (biases_buf + g * out_c) : NULL;
int* multi_g = priv_info->multi + g * out_c;
int* q_shift_g = priv_info->q_shift + g * out_c;
// for input block of L2 cache size
for(int col_i = 0; col_i < out_hw; col_i += col_cnt_l2)
{
int col_start = col_i;
int col_end = col_i + col_cnt_l2;
col_end = col_end > out_hw ? out_hw : col_end;
#ifdef __aarch64__
i8gemm4x16(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw,
col_start, col_end, 0, out_c & -16, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity);
if(out_c & 0xf)
i8gemm4x4(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw,
col_start, col_end, out_c & -16, out_c, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity);
#else
i8gemm4x8(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw,
col_start, col_end, 0, out_c & -8, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity);
if(out_c & 0x7)
i8gemm4x4(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw,
col_start, col_end, out_c & -8, out_c, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity);
#endif
} // col_cont
}
}
return 0;
}
|
GB_unjumbled_template.c | //------------------------------------------------------------------------------
// GB_unjumble_template: unjumble the vectors of a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task description
//----------------------------------------------------------------------
int64_t kfirst = A_slice [tid] ;
int64_t klast = A_slice [tid+1] ;
//----------------------------------------------------------------------
// sort vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k < klast ; k++)
{
//------------------------------------------------------------------
// check if the vector needs sorting
//------------------------------------------------------------------
bool jumbled = false ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
int64_t ilast = -1 ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
if (i < ilast)
{
jumbled = true ;
break ;
}
ilast = i ;
}
//------------------------------------------------------------------
// sort the vector
//------------------------------------------------------------------
if (jumbled)
{
int64_t aknz = pA_end - pA_start ;
GB_QSORT ;
}
}
}
}
#undef GB_QSORT
|
simd-clones-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-optimized -O3" } */
/* Test that functions that have SIMD clone counterparts are not
cloned by IPA-cp. For example, special_add() below has SIMD clones
created for it. However, if IPA-cp later decides to clone a
specialization of special_add(x, 666) when analyzing fillit(), we
will forever keep the vectorizer from using the SIMD versions of
special_add in a loop.
If IPA-CP gets taught how to adjust the SIMD clones as well, this
test could be removed. */
#pragma omp declare simd simdlen(4)
static int __attribute__ ((noinline))
special_add (int x, int y)
{
if (y == 666)
return x + y + 123;
else
return x + y;
}
void fillit(int *tot)
{
int i;
for (i=0; i < 10000; ++i)
tot[i] = special_add (i, 666);
}
/* { dg-final { scan-tree-dump-not "special_add.constprop" "optimized" } } */
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 80000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 100
#endif
#endif
#ifndef NTIMES
# define NTIMES 100
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static inline unsigned long long cycles()
{
unsigned long long u;
//asm volatile ("rdtsc;shlq $32,%%rdx;orq %%rdx,%%rax":"=a"(u)::"%rdx");
asm volatile ("rdtscp;shlq $32,%%rdx;orq %%rdx,%%rax;movq %%rax,%0":"=q"(u)::"%rax", "%rdx", "rcx");
return u;
}
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
unsigned long long c0 = 0;
long long c1 = 0;
c0 = cycles();
sleep(1);
c1 = cycles();
printf("1 second sleep, number of cycles = %ld\n", c1 - c0);
scalar = 3.0;
c0 = cycles();
//printf("c0 = %ld\n", c0);
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
__asm__ volatile("# Triad begins");
tuned_STREAM_Triad(scalar);
__asm__ volatile("# Triad ends");
//printf("%ld\n", c0);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
c1 = cycles();
//printf("c1 = %ld\n", c1);
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
//printf("cycles : %lf\n", (double) (c1 - c0)/NTIMES/STREAM_ARRAY_SIZE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
inline
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
HSetMaintainer.h | #ifndef HSET_MAINTAINER_H
#define HSET_MAINTAINER_H
/*************************************************************
* Copyright: (C) 2012 by Markus Schordan *
* Author : Markus Schordan *
* License : see file LICENSE in the CodeThorn distribution *
*************************************************************/
#include "HSet.h"
using namespace br_stl;
//#include "/usr/include/valgrind/memcheck.h"
//#define HSET_MAINTAINER_DEBUG_MODE
/*!
* \author Markus Schordan
* \date 2012.
*/
template<typename KeyType,typename HashFun>
class HSetMaintainer : public HSet<KeyType,HashFun> {
public:
typedef pair<bool,const KeyType*> ProcessingResult;
bool exists(KeyType& s) {
return determine(s)!=0;
}
size_t id(const KeyType& s) {
typename HSet<KeyType,HashFun>::const_iterator i=HSetMaintainer<KeyType,HashFun>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun>::end()) {
// in lack of operator '-' we compute the distance
size_t pos=0;
typename HSet<KeyType,HashFun>::const_iterator b=HSetMaintainer<KeyType,HashFun>::begin();
while(b!=i) {
pos++;
++b;
}
return pos;
}
else
throw "Error: unknown value. Maintainer cannot determine an id.";
}
typename HSetMaintainer<KeyType,HashFun>::iterator i;
KeyType* determine(KeyType& s) {
typename HSetMaintainer<KeyType,HashFun>::iterator i;
i=HSetMaintainer<KeyType,HashFun>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun>::end()) {
return const_cast<KeyType*>(&(*i));
} else {
return 0;
}
}
const KeyType* determine(const KeyType& s) {
typename HSetMaintainer<KeyType,HashFun>::iterator i;
i=HSetMaintainer<KeyType,HashFun>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun>::end()) {
return &(*i);
} else {
return 0;
}
}
//! <true,const KeyType> if new element was inserted
//! <false,const KeyType> if element already existed
ProcessingResult process(KeyType key) {
ProcessingResult res2;
#pragma omp critical
{
std::pair<typename HSetMaintainer::iterator, bool> res;
res=insert(key);
#ifdef HSET_MAINTAINER_DEBUG_MODE
std::pair<typename HSetMaintainer::iterator, bool> res1;
res1=insert(key);
std::pair<typename HSetMaintainer::iterator, bool> res2;
res2=insert(key);
if(!(res1==res2)) {
cerr<< "Error: HsetMaintainer failed:"<<endl;
cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<endl;
cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<endl;
exit(1);
}
cerr << "HSET insert OK"<<endl;
#endif
res2=make_pair(res.second,&(*res.first));
}
return res2;
}
const KeyType* processNew(KeyType& s) {
//std::pair<typename HSetMaintainer::iterator, bool> res=process(s);
ProcessingResult res=process(s);
if(res.first!=true) {
cerr<< "Error: HsetMaintainer::processNew failed:"<<endl;
cerr<< "res:";
cout <<":"<<res.first<<endl;
cout <<res.second->toString();
exit(1);
}
return res.second;
}
const KeyType* processNewOrExisting(KeyType& s) {
ProcessingResult res=process(s);
return res.second;
}
long numberOf() { return HSetMaintainer<KeyType,HashFun>::size(); }
long maxCollisions() {
return HSetMaintainer<KeyType,HashFun>::max_collisions();
}
double loadFactor() {
return HSetMaintainer<KeyType,HashFun>::load_factor();
}
long memorySize() const {
long mem=0;
for(typename HSetMaintainer<KeyType,HashFun>::const_iterator i
=HSetMaintainer<KeyType,HashFun>::begin();
i!=HSetMaintainer<KeyType,HashFun>::end();
++i) {
mem+=(*i).memorySize();
}
return mem+sizeof(*this);
}
private:
const KeyType* ptr(KeyType& s) {}
};
#endif
|
Compute.h | #ifndef COMPUTE_H_INCLUDED
#define COMPUTE_H_INCLUDED
#include <stdio.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <immintrin.h>
#include "Grad.h"
inline static __m256d fsqrt(__m256d val)
{
return _mm256_castsi256_pd(0x1ff7770000000000+_mm256_srli_epi64(_mm256_castpd_si256(val),1));
}
inline static __m256d normalize(__m256d val)
{
return _mm256_castsi256_pd(0x2ff2ff0000000000+_mm256_srli_epi64(_mm256_castpd_si256(val),2));
}
inline static void Screenshot(double m,double ph, double pv,int iter, int res,char mode,char mop,double mcx,double mcy)
{
char file[30];
int height=HEIGHT*res, width=WIDTH*res;
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char *pixels = malloc(height*4*width),tcb,tcg,tcr;
double prex=(width*(-0.5)+1.0*m*ph*res),prey=(height*(-0.5)-1.0*m*pv*res);
__m256d zy,cx,cy,x,y,four,mask,inv= _mm256_set1_pd(1.0/(360.0*m*res)),sum;
__m256d k,iterace,one,avg,avg1,smooth,smooth1;
__m256d zy1,cx1,cy1,x1,y1,mask1,sum1,k1;
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
__m256d zero=_mm256_set1_pd(0.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1);
four= _mm256_set1_pd(100.0);
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
SDL_Surface *surf = SDL_CreateRGBSurfaceFrom(pixels, width, height, 8*4, width*4, 0, 0, 0, 0);
sprintf_s(file,30,"images/%d.bmp",time(NULL));
SDL_SaveBMP(surf,file);
SDL_FreeSurface(surf);
free(pixels);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static void Render(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m256d zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m256d zy1,cx1,cy1,x1,y1,mask1,sum1,k1;
__m256d k,iterace,one;
__m256d zero=_mm256_set1_pd(0.0);
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1.0);
four= _mm256_set1_pd(100.0);
inv= _mm256_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char tcb,tcg,tcr;
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
}
inline static void RenderCol(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m256d zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m256d zy1,cx1,cy1,x1,y1,mask1,sum1,k1;
__m256d k,iterace,one;
__m256d zero=_mm256_set1_pd(0.0);
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1.0);
four= _mm256_set1_pd(100.0);
inv= _mm256_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char tcb,tcg,tcr;
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(y,x,cy);
y1=_mm256_fmadd_pd(y1,x1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,sum,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zy,x,y,cy,cx,sum,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zy=y*y;
zy1=y1*y1;
sum=_mm256_fmadd_pd(x,x,zy);
sum1=_mm256_fmadd_pd(x1,x1,zy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
x=_mm256_andnot_pd(mask3,x);
x1=_mm256_andnot_pd(mask3,x1);
y+=y;
y1+=y1;
y=_mm256_fmadd_pd(x,y,cy);
y1=_mm256_fmadd_pd(x1,y1,cy1);
x=_mm256_fmsub_pd(x,x,zy)+cx;
x1=_mm256_fmsub_pd(x1,x1,zy1)+cx1;
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k+=_mm256_and_pd(one,mask);
k1+=_mm256_and_pd(one,mask1);
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
}
#endif // COMPUTE_H_INCLUDED
|
relu-field.h | /*
Authors: Mayank Rathee, Deevashwer Rathee
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef RELU_FIELD_H__
#define RELU_FIELD_H__
#include "Millionaire/millionaire.h"
#include "NonLinear/drelu-field.h"
#include "NonLinear/relu-interface.h"
#define FIELD 1
template <typename type> class ReLUFieldProtocol : public ReLUProtocol<type> {
public:
sci::IOPack *iopack;
sci::OTPack *otpack;
TripleGenerator *triple_gen;
DReLUFieldProtocol *relu_triple_compare_oracle;
int party;
int algeb_str;
int l, b;
int num_cmps;
uint64_t p;
uint64_t p_bitlen;
uint64_t p_bitlen_triple_comparison;
uint64_t rhs_wrap;
uint64_t rhs_wrap_off;
uint64_t rhs_wrap_on;
uint8_t zero_small = 0;
// Constructor
ReLUFieldProtocol(int party, int algeb_str, sci::IOPack *iopack, int l, int b,
uint64_t mod, sci::OTPack *otpack) {
this->party = party;
this->algeb_str = algeb_str;
this->iopack = iopack;
this->l = l;
this->b = b;
this->p = mod;
this->p_bitlen = l;
this->p_bitlen_triple_comparison = l + 1;
this->otpack = otpack;
this->triple_gen = new TripleGenerator(party, iopack, otpack);
this->relu_triple_compare_oracle =
new DReLUFieldProtocol(party, l + 1, b, mod, iopack, otpack);
configure();
}
// Destructor
~ReLUFieldProtocol() {
delete triple_gen;
delete relu_triple_compare_oracle;
}
void configure() {
// Set all the inequality RHS values here.
rhs_wrap = this->p;
rhs_wrap_off = this->p / 2;
rhs_wrap_on = this->p + this->p / 2;
}
// Tester Ideal Functionality
void drelu_field_ideal_func(uint8_t *result, uint64_t *sh1, uint64_t *sh2,
int num_relu) {
uint8_t *wrap, *relu_wrap_off, *relu_wrap_on, *expected_drelu,
*actual_drelu;
wrap = new uint8_t[num_relu];
relu_wrap_off = new uint8_t[num_relu];
relu_wrap_on = new uint8_t[num_relu];
expected_drelu = new uint8_t[num_relu];
actual_drelu = new uint8_t[num_relu];
for (int i = 0; i < num_relu; i++) {
wrap[i] = greater_than_wrap(
sh1[i], sci::neg_mod((int64_t)(this->rhs_wrap - sh2[i]), this->p));
relu_wrap_off[i] = greater_than_relu_wrap_off(
sh1[i],
sci::neg_mod((int64_t)(this->rhs_wrap_off - sh2[i]), this->p));
relu_wrap_on[i] = greater_than_relu_wrap_on(
sh1[i], sci::neg_mod((int64_t)(this->rhs_wrap_on - sh2[i]), this->p));
expected_drelu[i] =
(relu_wrap_on[i] & wrap[i]) ^ ((1 ^ wrap[i]) & relu_wrap_off[i]);
expected_drelu[i] = 1 ^ expected_drelu[i];
actual_drelu[i] = (((sh1[i] + sh2[i]) % this->p) > this->rhs_wrap_off);
result[i] = 1 ^ actual_drelu[i];
assert(((result[i] & 1) == (1 & expected_drelu[i])) &&
"The computed DReLU did not match the actual DReLU");
}
}
// To handle the case when RHS is taken modulo p
uint8_t greater_than_wrap(uint64_t lhs, uint64_t rhs) {
if (rhs == 0ULL) {
return 0;
} else {
return (lhs >= rhs);
}
}
uint8_t greater_than_relu_wrap_off(uint64_t lhs, uint64_t rhs) {
if (rhs <= this->rhs_wrap_off) {
return (lhs > rhs);
} else {
return 1;
}
}
uint8_t greater_than_relu_wrap_on(uint64_t lhs, uint64_t rhs) {
if (rhs <= this->rhs_wrap_off) {
return 0;
} else {
return (lhs > rhs);
}
}
void drelu(uint8_t *drelu_res, uint64_t *share, int num_relu) {
this->relu_triple_compare_oracle->compute_drelu(drelu_res, share, num_relu);
}
void relu(type *result, type *share, int num_relu,
uint8_t *drelu_res = nullptr, bool skip_ot = false) {
uint8_t *drelu_ans = new uint8_t[num_relu];
drelu(drelu_ans, (uint64_t *)share, num_relu);
if (drelu_res != nullptr) {
memcpy(drelu_res, drelu_ans, num_relu * sizeof(uint8_t));
}
if (skip_ot) {
delete[] drelu_ans;
// std::cout<<"Doing Max and ArgMax OTs together"<<std::endl;
return;
}
// Now perform x.msb(x)
// 2 OTs required with reversed roles
uint64_t **ot_messages = new uint64_t *[num_relu];
for (int i = 0; i < num_relu; i++) {
ot_messages[i] = new uint64_t[2];
}
uint64_t *additive_masks = new uint64_t[num_relu];
uint64_t *received_shares = new uint64_t[num_relu];
this->triple_gen->prg->template random_mod_p<type>((type *)additive_masks,
num_relu, this->p);
for (int i = 0; i < num_relu; i++) {
additive_masks[i] %= this->p;
}
for (int i = 0; i < num_relu; i++) {
set_relu_end_ot_messages(ot_messages[i], share + i, drelu_ans + i,
((type *)additive_masks) + i);
}
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 1) {
if (party == sci::ALICE) {
otpack->iknp_reversed->recv(received_shares, drelu_ans, num_relu,
this->l);
} else { // party == sci::BOB
otpack->iknp_reversed->send(ot_messages, num_relu, this->l);
}
} else {
if (party == sci::ALICE) {
otpack->iknp_straight->send(ot_messages, num_relu, this->l);
} else { // party == sci::BOB
otpack->iknp_straight->recv(received_shares, drelu_ans, num_relu,
this->l);
}
}
}
for (int i = 0; i < num_relu; i++) {
result[i] = ((type *)additive_masks)[i] +
((type *)received_shares)[(8 / sizeof(type)) * i];
result[i] %= this->p;
}
delete[] additive_masks;
delete[] received_shares;
for (int i = 0; i < num_relu; i++) {
delete[] ot_messages[i];
}
delete[] ot_messages;
}
void set_relu_end_ot_messages(uint64_t *ot_messages, type *value_share,
uint8_t *xor_share, type *additive_mask) {
type temp0, temp1;
temp0 = sci::neg_mod((int64_t)value_share[0] - (int64_t)additive_mask[0],
this->p);
temp1 = sci::neg_mod((int64_t)0LL - (int64_t)additive_mask[0], this->p);
if (*xor_share == zero_small) {
ot_messages[0] = 0ULL + temp0;
ot_messages[1] = 0ULL + temp1;
} else {
ot_messages[0] = 0ULL + temp1;
ot_messages[1] = 0ULL + temp0;
}
}
};
#endif // RELU_FIELD_H__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.