source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
omp_pi.c | /**
* Shared memory (OpenMP) parallel computation of pi.
*
* @author Akash Pallath
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define REFPI 3.1415926535897932384626433
long double pi(long long nsteps){
long long i;
long double step, sum = 0.0;
step = 1.0 / ((long double) nsteps);
#pragma omp parallel
{
long double x; //private to process
#pragma omp for reduction(+:sum)
for(i = 0; i < nsteps; i++){
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
}
return step * sum;
}
int main(int argc, char* argv[]){
long long nsteps;
if(argc < 2){
printf("Required argument: number of steps to compute pi for\n");
exit(-1);
}
nsteps = atoll(argv[1]);
long double comp_pi = pi(nsteps);
long double error = fabs(comp_pi - REFPI);
printf("%lli steps; pi = %.25Lf; error = %.25Lf\n", nsteps, comp_pi, error);
}
|
kpoint.c | /* Copyright (C) 2008 Atsushi Togo */
/* All rights reserved. */
/* This file is part of spglib. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the spglib project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include "mathfunc.h"
#include "kpoint.h"
#include "kgrid.h"
#ifdef KPTWARNING
#include <stdio.h>
#define warning_print(...) fprintf(stderr,__VA_ARGS__)
#else
#define warning_print(...)
#endif
#define KPT_NUM_BZ_SEARCH_SPACE 125
static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3]);
static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]);
static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3]);
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3]);
static int check_mesh_symmetry(const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
/* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3],
int ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int num_ir;
size_t i;
size_t *dense_ir_mapping_table;
if ((dense_ir_mapping_table =
(size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_ir = kpt_get_dense_irreducible_reciprocal_mesh(grid_address,
dense_ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
ir_mapping_table[i] = dense_ir_mapping_table[i];
}
free(dense_ir_mapping_table);
dense_ir_mapping_table = NULL;
return num_ir;
}
size_t kpt_get_dense_irreducible_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
size_t num_ir;
num_ir = get_dense_ir_reciprocal_mesh(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
return num_ir;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3],
int ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const size_t num_q,
SPGCONST double qpoints[][3])
{
int num_ir;
size_t i;
size_t *dense_ir_mapping_table;
if ((dense_ir_mapping_table =
(size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_ir = kpt_get_dense_stabilized_reciprocal_mesh(grid_address,
dense_ir_mapping_table,
mesh,
is_shift,
is_time_reversal,
rotations,
num_q,
qpoints);
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
ir_mapping_table[i] = dense_ir_mapping_table[i];
}
free(dense_ir_mapping_table);
dense_ir_mapping_table = NULL;
return num_ir;
}
size_t kpt_get_dense_stabilized_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const size_t num_q,
SPGCONST double qpoints[][3])
{
size_t num_ir;
MatINT *rot_reciprocal, *rot_reciprocal_q;
double tolerance;
rot_reciprocal = NULL;
rot_reciprocal_q = NULL;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
num_q,
qpoints);
num_ir = get_dense_ir_reciprocal_mesh(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal_q);
rot_reciprocal_q = NULL;
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return num_ir;
}
void
kpt_get_dense_grid_points_by_rotations(size_t rot_grid_points[],
const int address_orig[3],
SPGCONST int (*rot_reciprocal)[3][3],
const int num_rot,
const int mesh[3],
const int is_shift[3])
{
int i;
int address_double_orig[3], address_double[3];
for (i = 0; i < 3; i++) {
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < num_rot; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal[i],
address_double_orig);
rot_grid_points[i] = kgd_get_dense_grid_point_double_mesh(address_double, mesh);
}
}
void
kpt_get_dense_BZ_grid_points_by_rotations(size_t rot_grid_points[],
const int address_orig[3],
SPGCONST int (*rot_reciprocal)[3][3],
const int num_rot,
const int mesh[3],
const int is_shift[3],
const size_t bz_map[])
{
int i;
int address_double_orig[3], address_double[3], bzmesh[3];
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < num_rot; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal[i],
address_double_orig);
rot_grid_points[i] =
bz_map[kgd_get_dense_grid_point_double_mesh(address_double, bzmesh)];
}
}
int kpt_relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
int i, num_bz_map, num_bzgp;
size_t *dense_bz_map;
num_bz_map = mesh[0] * mesh[1] * mesh[2] * 8;
if ((dense_bz_map =
(size_t*)malloc(sizeof(size_t) * num_bz_map)) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_bzgp = kpt_relocate_dense_BZ_grid_address(bz_grid_address,
dense_bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
for (i = 0; i < num_bz_map; i++) {
if (dense_bz_map[i] == num_bz_map) {
bz_map[i] = -1;
} else {
bz_map[i] = dense_bz_map[i];
}
}
free(dense_bz_map);
dense_bz_map = NULL;
return num_bzgp;
}
size_t kpt_relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
return relocate_dense_BZ_grid_address(bz_grid_address,
bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
}
MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
return get_point_group_reciprocal(rotations, is_time_reversal);
}
MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3])
{
return get_point_group_reciprocal_with_q(rot_reciprocal,
symprec,
num_q,
qpoints);
}
/* Return NULL if failed */
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_rot;
MatINT *rot_reciprocal, *rot_return;
int *unique_rot;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
rot_reciprocal = NULL;
rot_return = NULL;
unique_rot = NULL;
if (is_time_reversal) {
if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) {
return NULL;
}
} else {
if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) {
return NULL;
}
}
if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++) {
unique_rot[i] = -1;
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
num_rot = 0;
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_rot; j++) {
if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]],
rot_reciprocal->mat[i])) {
goto escape;
}
}
unique_rot[num_rot] = i;
num_rot++;
escape:
;
}
if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) {
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]);
}
}
free(unique_rot);
unique_rot = NULL;
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return rot_return;
}
/* Return NULL if failed */
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok, num_rot;
int *ir_rot;
double q_rot[3], diff[3];
MatINT * rot_reciprocal_q;
ir_rot = NULL;
rot_reciprocal_q = NULL;
is_all_ok = 0;
num_rot = 0;
if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) {
warning_print("spglib: Memory of ir_rot could not be allocated.");
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
rot_reciprocal->mat[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
ir_rot[num_rot] = i;
num_rot++;
}
}
if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) {
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
}
free(ir_rot);
ir_rot = NULL;
return rot_reciprocal_q;
}
static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) {
return get_dense_ir_reciprocal_mesh_normal(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
} else {
return get_dense_ir_reciprocal_mesh_distortion(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
}
}
static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] are 0 or 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* ir_mapping_table: the mapping from each point to ir-point. */
long i;
size_t grid_point_rot;
int j;
int address_double[3], address_double_rot[3];
kgd_get_all_grid_addresses(grid_address, mesh);
#pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
kgd_get_grid_address_double_mesh(address_double,
grid_address[i],
mesh,
is_shift);
ir_mapping_table[i] = i;
for (j = 0; j < rot_reciprocal->size; j++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[j],
address_double);
grid_point_rot = kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot < ir_mapping_table[i]) {
#ifdef _OPENMP
ir_mapping_table[i] = grid_point_rot;
#else
ir_mapping_table[i] = ir_mapping_table[grid_point_rot];
break;
#endif
}
}
}
return get_dense_num_ir(ir_mapping_table, mesh);
}
static size_t
get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
long i;
size_t grid_point_rot;
int j, k, indivisible;
int address_double[3], address_double_rot[3];
long long_address_double[3], long_address_double_rot[3], divisor[3];
/* divisor, long_address_double, and long_address_double_rot have */
/* long integer type to treat dense mesh. */
kgd_get_all_grid_addresses(grid_address, mesh);
for (j = 0; j < 3; j++) {
divisor[j] = mesh[(j + 1) % 3] * mesh[(j + 2) % 3];
}
#pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot, long_address_double, long_address_double_rot)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
kgd_get_grid_address_double_mesh(address_double,
grid_address[i],
mesh,
is_shift);
for (j = 0; j < 3; j++) {
long_address_double[j] = address_double[j] * divisor[j];
}
ir_mapping_table[i] = i;
for (j = 0; j < rot_reciprocal->size; j++) {
/* Equivalent to mat_multiply_matrix_vector_i3 except for data type */
for (k = 0; k < 3; k++) {
long_address_double_rot[k] =
rot_reciprocal->mat[j][k][0] * long_address_double[0] +
rot_reciprocal->mat[j][k][1] * long_address_double[1] +
rot_reciprocal->mat[j][k][2] * long_address_double[2];
}
for (k = 0; k < 3; k++) {
indivisible = long_address_double_rot[k] % divisor[k];
if (indivisible) {break;}
address_double_rot[k] = long_address_double_rot[k] / divisor[k];
if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) ||
(address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) {
indivisible = 1;
break;
}
}
if (indivisible) {continue;}
grid_point_rot =
kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot < ir_mapping_table[i]) {
#ifdef _OPENMP
ir_mapping_table[i] = grid_point_rot;
#else
ir_mapping_table[i] = ir_mapping_table[grid_point_rot];
break;
#endif
}
}
}
return get_dense_num_ir(ir_mapping_table, mesh);
}
static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3])
{
long i;
size_t num_ir;
num_ir = 0;
#pragma omp parallel for reduction(+:num_ir)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
if (ir_mapping_table[i] == i) {
num_ir++;
}
}
#ifdef _OPENMP
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]];
}
#endif
return num_ir;
}
static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
double tolerance, min_distance;
double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE];
int bzmesh[3], bz_address_double[3];
size_t i, boundary_num_gp, total_num_gp, bzgp, gp, num_bzmesh;
int j, k, min_index;
tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh);
for (j = 0; j < 3; j++) {
bzmesh[j] = mesh[j] * 2;
}
num_bzmesh = bzmesh[0] * bzmesh[1] * (size_t)(bzmesh[2]);
for (i = 0; i < num_bzmesh; i++) {
bz_map[i] = num_bzmesh;
}
boundary_num_gp = 0;
total_num_gp = mesh[0] * mesh[1] * (size_t)(mesh[2]);
/* Multithreading doesn't work for this loop since gp calculated */
/* with boundary_num_gp is unstable to store bz_grid_address. */
for (i = 0; i < total_num_gp; i++) {
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
for (k = 0; k < 3; k++) {
q_vector[k] =
((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 +
is_shift[k]) / ((double)mesh[k]) / 2;
}
mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector);
distance[j] = mat_norm_squared_d3(q_vector);
}
min_distance = distance[0];
min_index = 0;
for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance) {
min_distance = distance[j];
min_index = j;
}
}
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance + tolerance) {
if (j == min_index) {
gp = i;
} else {
gp = boundary_num_gp + total_num_gp;
}
for (k = 0; k < 3; k++) {
bz_grid_address[gp][k] =
grid_address[i][k] + bz_search_space[j][k] * mesh[k];
bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k];
}
bzgp = kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh);
bz_map[bzgp] = gp;
if (j != min_index) {
boundary_num_gp++;
}
}
}
}
return boundary_num_gp + total_num_gp;
}
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3])
{
int i, j;
double tolerance;
double length[3];
for (i = 0; i < 3; i++) {
length[i] = 0;
for (j = 0; j < 3; j++) {
length[i] += rec_lattice[j][i] * rec_lattice[j][i];
}
length[i] /= mesh[i] * mesh[i];
}
tolerance = length[0];
for (i = 1; i < 3; i++) {
if (tolerance < length[i]) {
tolerance = length[i];
}
}
tolerance *= 0.01;
return tolerance;
}
static int check_mesh_symmetry(const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int i, j, k, sum;
int eq[3];
eq[0] = 0; /* a=b */
eq[1] = 0; /* b=c */
eq[2] = 0; /* c=a */
/* Check 3 and 6 fold rotations and non-convensional choice of unit cells */
for (i = 0; i < rot_reciprocal->size; i++) {
sum = 0;
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
sum += abs(rot_reciprocal->mat[i][j][k]);
}
}
if (sum > 3) {
return 0;
}
}
for (i = 0; i < rot_reciprocal->size; i++) {
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 1 &&
rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;}
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 1 &&
rot_reciprocal->mat[i][2][0] == 0) {eq[1] = 1;}
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 0 &&
rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;}
}
return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) &&
((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) &&
((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2])));
}
|
ast-dump-openmp-target-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:4:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:10:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeDirective {{.*}} <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
displacement_field_gradient.c | #include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <float.h>
#include <string.h>
#include <stdbool.h>
#include "disptools.h"
#include "displacement_field_gradient.h"
#include "jacobian.h"
#include "error.h"
#define dfx_dx(f,x,y,z,idx) ((_((f), (x)+1,(y), (z), X) - _((f), (x)-1,(y), (z), X)) * (idx) * .5 + 1.0)
#define dfx_dy(f,x,y,z,idy) ((_((f), (x), (y)+1,(z), X) - _((f), (x), (y)-1,(z), X)) * (idy) * .5)
#define dfx_dz(f,x,y,z,idz) ((_((f), (x), (y), (z)+1, X) - _((f), (x), (y), (z)-1, X)) * (idz) * .5)
#define dfy_dx(f,x,y,z,idx) ((_((f), (x)+1,(y), (z), Y) - _((f), (x)-1,(y), (z), Y)) * (idx) * .5)
#define dfy_dy(f,x,y,z,idy) ((_((f), (x), (y)+1,(z), Y) - _((f), (x), (y)-1,(z), Y)) * (idy) * .5 + 1.0)
#define dfy_dz(f,x,y,z,idz) ((_((f), (x), (y), (z)+1, Y) - _((f), (x), (y), (z)-1, Y)) * (idz) * .5)
#define dfz_dx(f,x,y,z,idx) ((_((f), (x)+1,(y), (z), Z) - _((f), (x)-1,(y), (z), Z)) * (idx) * .5)
#define dfz_dy(f,x,y,z,idy) ((_((f), (x), (y)+1,(z), Z) - _((f), (x), (y)-1,(z), Z)) * (idy) * .5)
#define dfz_dz(f,x,y,z,idz) ((_((f), (x), (y), (z)+1, Z) - _((f), (x), (y), (z)-1, Z)) * (idz) * .5 + 1.0)
/*!
* \brief Compute the gradient of the loss function.
*/
static inline void gradient(
const Image f,
const Image g,
const Image J,
FLOATING *g_norm_2,
const Image voxel_error,
const FLOATING delta,
const FLOATING zeta
)
{
// Do not iterate over the voxels on the boundary
const size_t x_max = f.nx - 1;
const size_t y_max = f.ny - 1;
const size_t z_max = f.nz - 1;
// Precompute the step for finite differences
const FLOATING idx = 1.0 / f.dx;
const FLOATING idy = 1.0 / f.dy;
const FLOATING idz = 1.0 / f.dz;
// Local variable for the squared norm
FLOATING squared_norm = 0.0;
#ifdef __GNUC__
#pragma omp parallel for reduction(+: squared_norm) collapse(3) schedule(static)
for (size_t z = 1; z < z_max; ++z) {
for (size_t y = 1; y < y_max; ++y) {
for (size_t x = 1; x < x_max; ++x) {
#else // MSVC 15 does not support OpenMP > 2.0
int z;
#pragma omp parallel for reduction(+: squared_norm)
for (z = 1; z < z_max; ++z) {
for (size_t y = 1; y < y_max; ++y) {
for (size_t x = 1; x < x_max; ++x) {
#endif
// Actual cost
FLOATING error_xb = __(voxel_error, x-1, y, z );
FLOATING error_xf = __(voxel_error, x+1, y, z );
FLOATING error_yb = __(voxel_error, x, y-1, z );
FLOATING error_yf = __(voxel_error, x, y+1, z );
FLOATING error_zb = __(voxel_error, x, y, z-1);
FLOATING error_zf = __(voxel_error, x, y, z+1);
// Regularisation terms
if (__(J, x-1, y, z) < delta) {
error_xb += zeta * (__(J, x-1, y, z) - delta);
}
if (__(J, x+1, y, z) < delta) {
error_xf += zeta * (__(J, x+1, y, z) - delta);
}
if (__(J, x, y-1, z) < delta) {
error_yb += zeta * (__(J, x, y-1, z) - delta);
}
if (__(J, x, y+1, z) < delta) {
error_yf += zeta * (__(J, x, y+1, z) - delta);
}
if (__(J, x, y, z-1) < delta) {
error_zb += zeta * (__(J, x, y, z-1) - delta);
}
if (__(J, x, y, z+1) < delta) {
error_zf += zeta * (__(J, x, y, z+1) - delta);
}
// dc/dx
_(g, x, y, z, X) =
(+idx * (+error_xb * (dfy_dy(f, x-1, y, z, idy) * dfz_dz(f, x-1, y, z, idz) -
dfy_dz(f, x-1, y, z, idz) * dfz_dy(f, x-1, y, z, idy))
-error_xf * (dfy_dy(f, x+1, y, z, idy) * dfz_dz(f, x+1, y, z, idz) -
dfy_dz(f, x+1, y, z, idz) * dfz_dy(f, x+1, y, z, idy)))
-idy * (+error_yb * (dfy_dx(f, x, y-1, z, idx) * dfz_dz(f, x, y-1, z, idz) -
dfy_dz(f, x, y-1, z, idz) * dfz_dx(f, x, y-1, z, idx))
-error_yf * (dfy_dx(f, x, y+1, z, idx) * dfz_dz(f, x, y+1, z, idz) -
dfy_dz(f, x, y+1, z, idz) * dfz_dx(f, x, y+1, z, idx)))
+idz * (+error_zb * (dfy_dx(f, x, y, z-1, idx) * dfz_dy(f, x, y, z-1, idy) -
dfy_dy(f, x, y, z-1, idy) * dfz_dx(f, x, y, z-1, idx))
-error_zf * (dfy_dx(f, x, y, z+1, idx) * dfz_dy(f, x, y, z+1, idy) -
dfy_dy(f, x, y, z+1, idy) * dfz_dx(f, x, y, z+1, idx))));
// dc/dy
_(g, x, y, z, Y) =
(-idx * (+error_xb * (dfx_dy(f, x-1, y, z, idy) * dfz_dz(f, x-1, y, z, idz) -
dfx_dz(f, x-1, y, z, idz) * dfz_dy(f, x-1, y, z, idy))
-error_xf * (dfx_dy(f, x+1, y, z, idy) * dfz_dz(f, x+1, y, z, idz) -
dfx_dz(f, x+1, y, z, idz) * dfz_dy(f, x+1, y, z, idy)))
+idy * (+error_yb * (dfx_dx(f, x, y-1, z, idx) * dfz_dz(f, x, y-1, z, idz) -
dfx_dz(f, x, y-1, z, idz) * dfz_dx(f, x, y-1, z, idx))
-error_yf * (dfx_dx(f, x, y+1, z, idx) * dfz_dz(f, x, y+1, z, idz) -
dfx_dz(f, x, y+1, z, idz) * dfz_dx(f, x, y+1, z, idx)))
-idz * (+error_zb * (dfx_dx(f, x, y, z-1, idx) * dfz_dy(f, x, y, z-1, idy) -
dfx_dy(f, x, y, z-1, idy) * dfz_dx(f, x, y, z-1, idx))
-error_zf * (dfx_dx(f, x, y, z+1, idx) * dfz_dy(f, x, y, z+1, idy) -
dfx_dy(f, x, y, z+1, idy) * dfz_dx(f, x, y, z+1, idx))));
// dc/dz
_(g, x, y, z, Z) =
(+idx * (+error_xb * (dfx_dy(f, x-1, y, z, idy) * dfy_dz(f, x-1, y, z, idz) -
dfx_dz(f, x-1, y, z, idz) * dfy_dy(f, x-1, y, z, idy))
-error_xf * (dfx_dy(f, x+1, y, z, idy) * dfy_dz(f, x+1, y, z, idz) -
dfx_dz(f, x+1, y, z, idz) * dfy_dy(f, x+1, y, z, idy)))
-idy * (+error_yb * (dfx_dx(f, x, y-1, z, idx) * dfy_dz(f, x, y-1, z, idz) -
dfx_dz(f, x, y-1, z, idz) * dfy_dx(f, x, y-1, z, idx))
-error_yf * (dfx_dx(f, x, y+1, z, idx) * dfy_dz(f, x, y+1, z, idz) -
dfx_dz(f, x, y+1, z, idz) * dfy_dx(f, x, y+1, z, idx)))
+idz * (+error_zb * (dfx_dx(f, x, y, z-1, idx) * dfy_dy(f, x, y, z-1, idy) -
dfx_dy(f, x, y, z-1, idy) * dfy_dx(f, x, y, z-1, idx))
-error_zf * (dfx_dx(f, x, y, z+1, idx) * dfy_dy(f, x, y, z+1, idy) -
dfx_dy(f, x, y, z+1, idy) * dfy_dx(f, x, y, z+1, idx))));
// (dc/dx)^2 + (dc/dy)^2 + (dc/dz)^2
squared_norm += _(g, x, y, z, X) * _(g, x, y, z, X) +
_(g, x, y, z, Y) * _(g, x, y, z, Y) +
_(g, x, y, z, Z) * _(g, x, y, z, Z);
}
}
}
// Return squared norm
*g_norm_2 = squared_norm;
}
/*!
* \brief Move `old_field` along direction `g` with step size `eta`.
*/
static inline void move_field(
const Image old_field,
const Image new_field,
const Image g,
const FLOATING eta
)
{
#ifdef __GNUC__
#pragma omp parallel for collapse(3) schedule(static)
for (size_t z = 0; z < new_field.nz; ++z) {
for (size_t y = 0; y < new_field.ny; ++y) {
for (size_t x = 0; x < new_field.nx; ++x) {
#else // MSVC 15 does not support OpenMP > 2.0
int z;
#pragma omp parallel for
for (z = 0; z < new_field.nz; ++z) {
for (size_t y = 0; y < new_field.ny; ++y) {
for (size_t x = 0; x < new_field.nx; ++x) {
#endif
_(new_field, x, y, z, X) = _(old_field, x, y, z, X) - eta * _(g, x, y, z, X);
_(new_field, x, y, z, Y) = _(old_field, x, y, z, Y) - eta * _(g, x, y, z, Y);
_(new_field, x, y, z, Z) = _(old_field, x, y, z, Z) - eta * _(g, x, y, z, Z);
}
}
}
}
/*!
* \brief Find a displacement field that realises the given Jacobian.
*
* Employ a greedy search, starting from an initial guess of the
* displacement field (passed in the `field' argument). At each
* iteration, compute the Jacobian of the current displacement field,
* then correct the components of the field on each voxel.
*
* Use two couples of buffers, to store a copy of the displacement field
* and its Jacobian at the current iteration, before and after the
* correction. If the correction improves the result, then switch the
* buffers and proceed with the next iteration, otherwise keep the
* current displacement field.
*/
void generate_displacement_gradient(
const size_t nx, /*!< Width of the image */
const size_t ny, /*!< Length of the image */
const size_t nz, /*!< Depth of the image */
const FLOATING dx, /*!< x spacing */
const FLOATING dy, /*!< y spacing */
const FLOATING dz, /*!< z spacing */
const FLOATING *J, /*!< Target Jacobian */
const bool *mask, /*!< Body mask */
const FLOATING epsilon, /*!< Tolerance on the Jacobian per voxel */
const FLOATING tolerance, /*!< Jacobian tolerance on background */
FLOATING eta, /*!< Step length for the optimisation */
const FLOATING eta_max, /*!< Maximum step length allowed */
const FLOATING alpha, /*!< Step length increase coefficient */
const FLOATING beta, /*!< Step length decrease coefficient */
const FLOATING gamma, /*!< Armijo-Goldstein parameter */
const FLOATING delta, /*!< Jacobian regularisation threshold */
const FLOATING zeta, /*!< Jacobian regularisation weight */
const FLOATING theta, /*!< Termination condition based on improvement */
const FLOATING iota, /*!< Termination condition based on eta */
const bool strict, /*!< Always improve maximum voxel error */
const size_t it_max, /*!< Maximum number of iterations */
FLOATING *field /*!< Resulting displacement field */
)
{
ASSERT_PARAMETERS;
disptools_clear_error();
// Image size
const size_t voxel_number = nx * ny * nz;
const size_t image_size = voxel_number * sizeof (FLOATING);
// Use two buffers that are swapped
unsigned old_buffer = 0, new_buffer = 1;
// Wrap arrays in data strucutres
Image J_ = {1, nx, ny, nz, dx, dy, dz, (FLOATING*) J};
Mask mask_ = {nx, ny, nz, (bool*) mask};
// Allocate memory for the Jacobian map of the moving field
// Use two buffers
Image J_field_[2] = {
new_image(3, nx, ny, nz, dx, dy, dz),
new_image(3, nx, ny, nz, dx, dy, dz),
};
// Allocate memory for the moving field
// Use two buffers
Image field_[2] = {
new_image(3, nx, ny, nz, dx, dy, dz),
new_image(3, nx, ny, nz, dx, dy, dz),
};
// Allocate memory for the voxel error term
Image voxel_error = new_image(1, nx, ny, nz, dx, dy, dz);
// Allocate memory for the gradient
Image g = new_image(3, nx, ny, nz, dx, dy, dz);
FLOATING last_error = DBL_MAX, error = DBL_MAX;
FLOATING max_voxel_error = DBL_MAX, last_max_voxel_error = DBL_MAX;
FLOATING g_norm_2 = 0.0;
if (disptools_has_error()) {
goto cleanup;
}
// Copy initial guess in the buffer
memcpy(field_[old_buffer].data, field, 3 * image_size);
// Compute the error of the initial guess
jacobian(field_[old_buffer], J_field_[old_buffer]);
last_error = compute_error(J_,
J_field_[old_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
// Verbose feedback
verbose_printf(true,
"Iteration %5ld: "
"total error %6e "
"max voxel error %6e "
"eta %6e\n",
0l, last_error, max_voxel_error, eta);
// Compute gradient
gradient(field_[old_buffer],
g,
J_field_[old_buffer],
&g_norm_2,
voxel_error,
delta,
zeta
);
// Find an high initial eta
do {
eta *= alpha;
// Update the moving displacement field
move_field(field_[old_buffer],
field_[new_buffer],
g,
eta
);
// Compute the Jacobian map of the moving displacement field
jacobian(field_[new_buffer], J_field_[new_buffer]);
// Compute the error of the moving field
error = compute_error(J_,
J_field_[new_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
// Armijo-Goldstein condition
} while (eta < eta_max && error - last_error > -gamma * eta * g_norm_2);
// One alpha in excess from the last iteration, one to compensate
// for the increment before the first iteration
eta /= alpha * alpha;
// Recompute the initial error
last_error = compute_error(J_,
J_field_[old_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
size_t it;
for (it = 1; it <= it_max; ++it) {
// Compute gradient
gradient(field_[old_buffer],
g,
J_field_[old_buffer],
&g_norm_2,
voxel_error,
delta,
zeta
);
// Backtracking line search
eta *= alpha;
eta = eta > eta_max ? eta_max : eta;
while (true) {
// Update the moving displacement field
move_field(field_[old_buffer],
field_[new_buffer],
g,
eta
);
// Compute the Jacobian map of the moving displacement field
jacobian(field_[new_buffer], J_field_[new_buffer]);
// Compute the error of the moving field
error = compute_error(J_,
J_field_[new_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
// Armijo-Goldstein condition
const bool eta_good = eta >= iota;
const bool ag_condition = error - last_error > -gamma * eta * g_norm_2;
const bool strict_condition = strict && max_voxel_error > last_max_voxel_error;
if (eta_good && (ag_condition || strict_condition)) {
eta *= beta;
}
else {
break;
}
}
// Verbose feedback
verbose_printf(true,
"Iteration %5ld: "
"total error %6e "
"max voxel error %6e "
"eta %6e\n",
it, error, max_voxel_error, eta);
// Stopping conditions
if (!isnormal(error)) {
verbose_printf(true, "Terminating: error exploded.\n");
break;
}
if (eta < iota) {
verbose_printf(true, "Error not decreasing, terminating.\n");
break;
}
if (1.0 - error / last_error < theta) {
verbose_printf(true, "Error not decreasing, terminating.\n");
break;
}
if (!isnormal(max_voxel_error)) {
verbose_printf(true, "Terminating: voxel error exploded.\n");
break;
}
if (max_voxel_error < epsilon) {
verbose_printf(true, "Terminating: reached desired tolerance.\n");
break;
}
// Save error and swap the buffers
last_error = error;
last_max_voxel_error = max_voxel_error;
XOR_SWAP(old_buffer, new_buffer);
}
verbose_printf(it == it_max, "Terminating: reached maximum number of iterations.\n");
// Copy result for the caller
memcpy(field, field_[old_buffer].data, 3 * image_size);
cleanup:
// Release buffers
delete_image(&field_[0]);
delete_image(&field_[1]);
delete_image(&J_field_[0]);
delete_image(&J_field_[1]);
delete_image(&g);
delete_image(&voxel_error);
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict append_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImagePixels)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
value;
value=(ssize_t) strtol(q,&q,10);
(void) value;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent-
(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
#if 0
/* FUTURE: remove this code. -- Anthony 29 Arpil 2012
Removed as GetMagickProperty() will will never match a "filename:"
string as this is not a 'known' image property.
*/
if ((image_info != (const ImageInfo *) NULL) &&
(image != (const Image *) NULL))
value=GetMagickProperty(image_info,image,pattern);
else
#endif
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),value,(size_t)
(MaxTextExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth > (8*sizeof(MagickSizeType)))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*extension != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) ResetMagickMemory(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__max_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_int16
// A.*B function (eWiseMult): GB_AemultB__max_int16
// A*D function (colscale): GB_AxD__max_int16
// D*A function (rowscale): GB_DxB__max_int16
// C+=B function (dense accum): GB_Cdense_accumB__max_int16
// C+=b function (dense accum): GB_Cdense_accumb__max_int16
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int16
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int16
// C=scalar+B GB_bind1st__max_int16
// C=scalar+B' GB_bind1st_tran__max_int16
// C=A+scalar GB_bind2nd__max_int16
// C=A'+scalar GB_bind2nd_tran__max_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT16 || GxB_NO_MAX_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 14;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
dijkstra_100_OMP.c | /*
Code owned by Geeks for Geeks
Source: https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
*/
#include <omp.h>
#include <limits.h>
#include <stdio.h>
#include <stdbool.h>
#include "timer.h"
// Number of vertices in the graph
#define V 100
// A utility function to find the vertex with minimum distance value, from
// the set of vertices not yet included in shortest path tree
int minDistance(int dist[], bool sptSet[])
{
// Initialize min value
int min = INT_MAX, min_index;
#pragma omp parallel for
for (int v = 0; v < V; v++)
if (sptSet[v] == false && dist[v] <= min)
min = dist[v], min_index = v;
return min_index;
}
// A utility function to print the constructed distance array
void printSolution(int dist[])
{
printf("Vertex \t\t Distance from Source\n");
#pragma omp parallel for
for (int i = 0; i < V; i++)
printf("%d \t\t %d\n", i, dist[i]);
}
// Function that implements Dijkstra's single source shortest path algorithm
// for a graph represented using adjacency matrix representation
void dijkstra(int graph[V][V], int src)
{
int dist[V]; // The output array. dist[i] will hold the shortest
// distance from src to i
bool sptSet[V]; // sptSet[i] will be true if vertex i is included in shortest
// path tree or shortest distance from src to i is finalized
// Initialize all distances as INFINITE and stpSet[] as false
#pragma omp parallel shared (dist, sptSet, src, graph)
#pragma omp parallel for
for (int i = 0; i < V; i++)
dist[i] = INT_MAX, sptSet[i] = false;
// Distance of source vertex from itself is always 0
dist[src] = 0;
// Find shortest path for all vertices
#pragma omp parallel for
for (int count = 0; count < V - 1; count++) {
// Pick the minimum distance vertex from the set of vertices not
// yet processed. u is always equal to src in the first iteration.
int u = minDistance(dist, sptSet);
// Mark the picked vertex as processed
sptSet[u] = true;
// Update dist value of the adjacent vertices of the picked vertex.
for (int v = 0; v < V; v++)
// Update dist[v] only if is not in sptSet, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value of dist[v]
if (!sptSet[v] && graph[u][v] && dist[u] != INT_MAX
&& dist[u] + graph[u][v] < dist[v])
dist[v] = dist[u] + graph[u][v];
}
// print the constructed distance array
printSolution(dist);
}
// driver program to test above function
int main()
{
/* Let us create the example graph discussed above */
int graph[V][V] = {
{ 0, 4, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 4, 0, 8, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 8, 0, 7, 0, 4, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 7, 0, 9, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 9, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 4, 14, 10, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 2, 0, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 8, 11, 0, 0, 0, 0, 1, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 2, 0, 0, 0, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
};
StartTimer();
dijkstra(graph, 0);
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
return 0;
}
|
FileOperations.c | // Axel Zuchovicki A01022875
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <math.h>
#include "FileOperations.h"
#include "tools.h"
void divide_buffer(unsigned char *buffer, unsigned char *parity, file_part **all_parts, int server_amount,
size_t file_length) {
*all_parts = malloc(server_amount * sizeof(file_part));
// The size for the parity file is the file length divided by the amount of servers. We must apply the ceiling
// to make sure we catch all the information and not loose any bits.
size_t parity_size = (size_t) ceil((double) file_length / server_amount);
// Initialize all parts.
for (int i = 0; i < server_amount; ++i) {
(*all_parts)[i].buffer = calloc((size_t) ceil((double) file_length / server_amount), sizeof(unsigned char));
// The amounts of bits is defined by the bytes times 8, divided by the server amount.
// We need to check if the specific part has an extra bit, that is the last addition.
(*all_parts)[i].bit_amount = (file_length * 8) / server_amount + (i < file_length % server_amount);
// If it is one of the first to servers, then we need to copy the parity file.
if (i < 2) {
(*all_parts)[i].parity_size = parity_size;
(*all_parts)[i].parity_file = malloc(parity_size * sizeof(unsigned char));
strncpy((char *) (*all_parts)[i].parity_file, (char *) parity, parity_size);
// If it isn't then we just set the size to 0 and don't initialize it.
} else {
(*all_parts)[i].parity_size = 0;
(*all_parts)[i].parity_file = NULL;
}
}
#pragma omp parallel for default(none) shared(all_parts, server_amount, file_length, buffer) if(file_length > MIN_PARALLEL_LEN)
// Iterates for each bit.
for (unsigned long i = 0; i < file_length * 8; ++i) {
// If it is less than the amount of bits for the part, then just continue.
if (i / server_amount > (*all_parts)[i % server_amount].bit_amount) {
continue;
}
// Get the value for the bit currently setting.
bool current_val = (bool) (buffer[i / 8] & (1 << (7 - i % 8)));
// Get the amount of left shift for the divided buffer.
int shift_amount = 7 - (int) ((i / server_amount) % 8);
// Set the bit in the correct server.
(*all_parts)[i % server_amount].buffer[i / (server_amount * 8)] |= current_val << shift_amount;
}
}
void merge_parts(file_part *all_parts, int server_amount, unsigned char *buffer, size_t file_length) {
#pragma omp parallel for default(none) shared(all_parts, server_amount, file_length, buffer) if(file_length > MIN_PARALLEL_LEN)
// Iterates through all the bits.
for (unsigned long i = 0; i < file_length * 8; ++i) {
// If it is less than the amount of bits for the part, then just continue.
if (i / server_amount >= all_parts[i % server_amount].bit_amount) {
continue;
}
// The current value is set by the bit from the part that is its turn, left-shifted by
// the position for the bit in that part. Similar to the procedure in the buffer division.
bool current_val = (bool) (all_parts[i % server_amount].buffer[i / (server_amount * 8)] &
(1 << (7 - (i / server_amount) % 8)));
// The left-shift for the bit that will be set in the buffer is calculated.
int shift_amount = 7 - (int) (i % 8);
// The bit in the buffer is set.
buffer[i / 8] |= current_val << shift_amount;
}
}
int read_file(char *filename, unsigned char **buffer, size_t *file_length) {
FILE *file;
file = fopen(filename, "rb");
if (file == NULL) {
return FILE_OPEN_ERROR;
}
// Get the file size by going to the end of the file.
fseek(file, 0, SEEK_END);
*file_length = (size_t) ftell(file);
// Go back to the beginning.
rewind(file);
// Initialize the buffer
*buffer = malloc((*file_length) * sizeof(unsigned char));
// Read the bits into the buffer.
fread(*buffer, *file_length, 1, file);
fclose(file);
return 0;
}
void get_parity(unsigned char *buffer, int server_amount, size_t file_length, unsigned char **parity_file) {
// Initialize the parity file.
*parity_file = calloc((size_t) ceil((double) file_length / server_amount), sizeof(unsigned char));
size_t bit_amount = file_length * 8;
#pragma omp parallel for default(none) shared(buffer, server_amount, parity_file, bit_amount) if(file_length > MIN_PARALLEL_LEN)
// Iterate through all the bits.
for (unsigned long i = 0; i < bit_amount; i += server_amount) {
// Start the bit in false.
bool current_value = false;
// Iterate through all the servers.
for (int j = 0; j < server_amount; j++) {
// If the bit corresponding to that server is set, then change the parity value.
if (i + j >= bit_amount) {
break;
}
if (buffer[(i + j) / 8] & (1 << (7 - (i + j) % 8))) {
current_value = !current_value;
}
}
// Calculate the shift for the bit in the parity file.
int shift_value = 7 - (int) ((i / server_amount) % 8);
// Set the bit.
(*parity_file)[i / (8 * server_amount)] |= current_value << shift_value;
}
bool current_value = false;
// Iterate through the last bits that might not have completed the loop, not setting the value.
for (unsigned long i = bit_amount - file_length % server_amount; i < bit_amount; i++) {
if (buffer[i / 8] & (1 << (7 - i % 8))) {
current_value = !current_value;
}
}
// If we need to set the final bit for the parity file.
if (file_length % server_amount != server_amount - 1) {
// Set the bit the same way that we did in the loop.
int final_shift = 7 - (int) (file_length * 8) / server_amount % 8;
(*parity_file)[file_length / server_amount] |= current_value << final_shift;
}
}
void loose_bits(file_part *part_to_loose) {
// Set all the bits to 0.
memset(part_to_loose->buffer, 0, (size_t) ceil((double) part_to_loose->bit_amount / 8));
// If it has a parity file.
if (part_to_loose->parity_size > 0) {
// Set all the bits in the parity file ot 0.
memset(part_to_loose->parity_file, 0, (size_t) part_to_loose->parity_size);
part_to_loose->parity_size = 0;
}
part_to_loose->bit_amount = 0;
}
void recover_part(file_part *all_parts, int server_amount, int part_to_recover, unsigned char *parity_file) {
// If we lost the first part, set the reference for the parity file to the second.
int reference = part_to_recover == 0 ? 1 : 0;
// An extra bit would ensure that we never loose information. If that extra bit is a 0, then it doesn't affect the file.
all_parts[part_to_recover].bit_amount = all_parts[reference].bit_amount + 1;
// Re-init the buffer for the lost part.
all_parts[part_to_recover].buffer = calloc((size_t) ceil((double) all_parts[part_to_recover].bit_amount / 8),
sizeof(unsigned char));
// Iterate till an extra bit for the special case.
#pragma omp parallel for default(none) shared(parity_file, all_parts, server_amount, part_to_recover, reference) if(all_parts[reference].bit_amount/8 > MIN_PARALLEL_LEN)
for (unsigned long i = 0; i <= all_parts[reference].bit_amount; i++) {
// Start with an unset bit.
bool current_value = false;
for (int j = 0; j < server_amount; j++) {
if (all_parts[j].bit_amount <= i || j == part_to_recover) continue;
// Keep track of the set bits in current value.
if (all_parts[j].buffer[i / 8] & (1 << (7 - i % 8))) {
current_value = !current_value;
}
}
// Get the corresponding bit in the parity file.
bool current_parity = (bool) (parity_file[i / 8] & 1 << (7 - i % 8));
// If the bit calculated is different than the one in the parity file
if (current_parity != current_value && all_parts[part_to_recover].bit_amount > i) {
// We need to set the bit in the part that was lost.
all_parts[part_to_recover].buffer[i / 8] |= 1 << (7 - i % 8);
}
}
}
void print_descriptive_buffer(file_part *part) {
for (unsigned long i = 0; i < part->bit_amount / 8; ++i) {
// Cast the byte to an int to see easily its value.
printf("%d ", (int) (part->buffer[i]));
}
printf("\n");
}
// Frees the memory allocated by the sent part.
void free_part(file_part *part) {
if (part->buffer != NULL) {
free(part->buffer);
}
// Maybe the parity file was not set.
if (part->parity_file != NULL) {
free(part->parity_file);
}
}
void free_parts(file_part **parts, int server_amount) {
// Free all the parts before freeing the array.
for (int i = 0; i < server_amount; ++i) {
free_part(&((*parts)[i]));
}
free(*parts);
}
int write_file(char *filename, unsigned char *buffer, size_t file_length) {
FILE *file;
file = fopen(filename, "wb");
if (file == NULL) {
return FILE_OPEN_ERROR;
}
fwrite(buffer, file_length, sizeof(unsigned char), file);
fclose(file);
return 0;
}
void handle_reading_error(int error, char *filename) {
if (error == FILE_OPEN_ERROR) {
printf("Can't open file %s\n", filename);
} else {
printf("Unknown reading error for file %s\n", filename);
}
}
|
omp_hello.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tidl;
// fork a team of threads giving them their own copies of variables
#pragma omp parallel private(nthreads, tid)
{
// obtain thread number
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} // all threads join master thread and disband
}
|
ark_heat1D_ompdev.c | /*---------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
*---------------------------------------------------------------
* Based on the serial example ark_heat1D.c developed by
* Daniel R. Reynolds and parallelized with OpenMP 4.5.
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a simple 1D heat equation,
* u_t = k*u_xx + f
* for t in [0, 10], x in [0, 1], with initial conditions
* u(0,x) = 0
* Dirichlet boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* and a point-source heating term,
* f = 1 for x=0.5.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with either an ERK or DIRK
* method. For the DIRK method, we use a Newton iteration with
* the SUNLinSol_PCG linear solver, and a user-supplied Jacobian-vector
* product routine.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts. */
#include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */
#include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */
#include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
realtype dx; /* mesh spacing */
realtype k; /* diffusion coefficient */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main() {
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(1.0); /* final time */
int Nt = 10; /* total number of output times */
realtype rtol = 1.e-6; /* relative tolerance */
realtype atol = 1.e-10; /* absolute tolerance */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype k = 0.5; /* heat conductivity */
sunindextype i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
SUNLinearSolver LS = NULL; /* empty linear solver object */
void *arkode_mem = NULL; /* empty ARKStep memory structure */
FILE *FID, *UFID;
realtype t, dTout, tout;
int iout;
long int nst, nst_a, nfe, nfi, nsetups, nli, nJv, nlcf, nni, ncfn, netf;
/* Create the SUNDIALS context object for this simulation */
SUNContext ctx;
flag = SUNContext_Create(NULL, &ctx);
if (check_flag(&flag, "SUNContext_Create", 1)) return 1;
/* allocate and fill udata structure */
udata = (UserData) malloc(sizeof(*udata));
udata->N = N;
udata->k = k;
udata->dx = RCONST(1.0)/(1.0*N-1.0); /* mesh spacing */
/* Initial problem output */
printf("\n1D Heat PDE test problem:\n");
printf(" N = %li\n", (long int) udata->N);
printf(" diffusion coefficient: k = %"GSYM"\n", udata->k);
/* Initialize data structures */
y = N_VNew_OpenMPDEV(N, ctx); /* Create OpenMPDEV vector for solution */
if (check_flag((void *) y, "N_VNew_Serial", 0)) return 1;
N_VConst(0.0, y); /* Set initial conditions */
/* Call ARKStepCreate to initialize the integrator memory and specify the
right-hand side function in y'=f(t,y), the inital time T0, and
the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
arkode_mem = ARKStepCreate(NULL, f, T0, y, ctx);
if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */
if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1;
flag = ARKStepSetPredictorMethod(arkode_mem, 1); /* Specify maximum-order predictor */
if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Initialize PCG solver -- no preconditioning, with up to N iterations */
LS = SUNLinSol_PCG(y, 0, N, ctx);
if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1;
/* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */
flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1;
/* Specify linearly implicit RHS, with non-time-dependent Jacobian */
flag = ARKStepSetLinear(arkode_mem, 0);
if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1;
/* output mesh to disk */
FID=fopen("heat_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data array */
UFID=fopen("heat1D.txt","w");
data = N_VGetHostArrayPointer_OpenMPDEV(y);
N_VCopyFromDevice_OpenMPDEV(y); /* always copy back from device before printing */
/* output initial condition to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
/* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms\n");
printf(" -------------------------\n");
printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N));
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStep", 1)) break;
printf(" %10.6"FSYM" %10.6"FSYM"\n", t, SUNRsqrt(N_VDotProd(y,y)/N)); /* print solution stats */
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
N_VCopyFromDevice_OpenMPDEV(y); /* copy back from device before printing solution */
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
}
printf(" -------------------------\n");
fclose(UFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumLinIters(arkode_mem, &nli);
check_flag(&flag, "ARKStepGetNumLinIters", 1);
flag = ARKStepGetNumJtimesEvals(arkode_mem, &nJv);
check_flag(&flag, "ARKStepGetNumJtimesEvals", 1);
flag = ARKStepGetNumLinConvFails(arkode_mem, &nlcf);
check_flag(&flag, "ARKStepGetNumLinConvFails", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total linear iterations = %li\n", nli);
printf(" Total number of Jacobian-vector products = %li\n", nJv);
printf(" Total number of linear solver convergence failures = %li\n", nlcf);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n", netf);
/* Clean up and return with successful completion */
N_VDestroy(y); /* Free vectors */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
SUNContext_Free(&ctx); /* Free context */
return 0;
}
/*--------------------------------
* Functions called by the solver
*--------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype k = udata->k;
realtype dx = udata->dx;
realtype *Y=NULL, *Ydot=NULL;
realtype c1, c2;
sunindextype i, isource;
int dev;
dev = omp_get_default_device();
Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); /* access data arrays */
if (check_flag((void *) Y, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot);
if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
N_VConst(0.0, ydot); /* Initialize ydot to zero */
/* iterate over domain, computing all equations */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
isource = N/2;
#pragma omp target map(to:c1,c2,isource,N,dx) is_device_ptr(Ydot,Y) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i=1; i<N-1; i++)
Ydot[i] = c1*Y[i-1] + c2*Y[i] + c1*Y[i+1];
#pragma omp target is_device_ptr(Ydot) device(dev)
{
Ydot[isource] += 0.01/dx; /* source term */
}
return 0; /* Return with success */
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp)
{
UserData udata = (UserData) user_data; /* variable shortcuts */
sunindextype N = udata->N;
realtype k = udata->k;
realtype dx = udata->dx;
realtype *V=NULL, *JV=NULL;
realtype c1, c2;
sunindextype i;
int dev;
dev = omp_get_default_device();
V = N_VGetDeviceArrayPointer_OpenMPDEV(v); /* access data arrays */
if (check_flag((void *) V, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv);
if (check_flag((void *) JV, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1;
N_VConst(0.0, Jv); /* initialize Jv product to zero */
/* iterate over domain, computing all Jacobian-vector products */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
#pragma omp target map(to:c1,c2,N) is_device_ptr(JV,V) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i=1; i<N-1; i++)
JV[i] = c1*V[i-1] + c2*V[i] + c1*V[i+1];
return 0; /* Return with success */
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
miniflux.gold.h | void miniflux_gold (
double *new_box_0_in, double *new_box_1_in, double *new_box_2_in, double *new_box_3_in, double *new_box_4_in,
double *old_box_0_in, double *old_box_1_in, double *old_box_2_in, double *old_box_3_in, double *old_box_4_in,
double *gx_0_in, double *gx_1_in, double *gx_2_in, double *gx_3_in, double *gx_4_in,
double *gy_0_in, double *gy_1_in, double *gy_2_in, double *gy_3_in, double *gy_4_in,
double *gz_0_in, double *gz_1_in, double *gz_2_in, double *gz_3_in, double *gz_4_in, int N) {
double factor1 = (1.0/12.0);
double factor2 = 2.0;
double (*new_box_0)[320][320] = (double (*)[320][320]) new_box_0_in;
double (*new_box_1)[320][320] = (double (*)[320][320]) new_box_1_in;
double (*new_box_2)[320][320] = (double (*)[320][320]) new_box_2_in;
double (*new_box_3)[320][320] = (double (*)[320][320]) new_box_3_in;
double (*new_box_4)[320][320] = (double (*)[320][320]) new_box_4_in;
double (*old_box_0)[320][320] = (double (*)[320][320]) old_box_0_in;
double (*old_box_1)[320][320] = (double (*)[320][320]) old_box_1_in;
double (*old_box_2)[320][320] = (double (*)[320][320]) old_box_2_in;
double (*old_box_3)[320][320] = (double (*)[320][320]) old_box_3_in;
double (*old_box_4)[320][320] = (double (*)[320][320]) old_box_4_in;
double (*gx_0)[320][320] = (double (*)[320][320]) gx_0_in;
double (*gx_1)[320][320] = (double (*)[320][320]) gx_1_in;
double (*gx_2)[320][320] = (double (*)[320][320]) gx_2_in;
double (*gx_3)[320][320] = (double (*)[320][320]) gx_3_in;
double (*gx_4)[320][320] = (double (*)[320][320]) gx_4_in;
double (*gy_0)[320][320] = (double (*)[320][320]) gy_0_in;
double (*gy_1)[320][320] = (double (*)[320][320]) gy_1_in;
double (*gy_2)[320][320] = (double (*)[320][320]) gy_2_in;
double (*gy_3)[320][320] = (double (*)[320][320]) gy_3_in;
double (*gy_4)[320][320] = (double (*)[320][320]) gy_4_in;
double (*gz_0)[320][320] = (double (*)[320][320]) gz_0_in;
double (*gz_1)[320][320] = (double (*)[320][320]) gz_1_in;
double (*gz_2)[320][320] = (double (*)[320][320]) gz_2_in;
double (*gz_3)[320][320] = (double (*)[320][320]) gz_3_in;
double (*gz_4)[320][320] = (double (*)[320][320]) gz_4_in;
#pragma omp parallel for
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=2;ix<N-1;ix++){
gx_0[iz][iy][ix] = factor1*
(old_box_0[iz][iy][ix-2]+
7*(old_box_0[iz][iy][ix-1]+old_box_0[iz][iy][ix]) +
old_box_0[iz][iy][ix+1]);
gx_1[iz][iy][ix] = factor1*
(old_box_1[iz][iy][ix-2]+
7*(old_box_1[iz][iy][ix-1]+old_box_1[iz][iy][ix]) +
old_box_1[iz][iy][ix+1]);
gx_2[iz][iy][ix] = factor1*
(old_box_2[iz][iy][ix-2]+
7*(old_box_2[iz][iy][ix-1]+old_box_2[iz][iy][ix]) +
old_box_2[iz][iy][ix+1]);
gx_3[iz][iy][ix] = factor1*
(old_box_3[iz][iy][ix-2]+
7*(old_box_3[iz][iy][ix-1]+old_box_3[iz][iy][ix]) +
old_box_3[iz][iy][ix+1]);
gx_4[iz][iy][ix] = factor1*
(old_box_4[iz][iy][ix-2]+
7*(old_box_4[iz][iy][ix-1]+old_box_4[iz][iy][ix]) +
old_box_4[iz][iy][ix+1]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gx_0[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_1[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_3[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_4[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_2[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N-1;ix++){
new_box_0[iz][iy][ix]+= gx_0[iz][iy][ix+1]-gx_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gx_1[iz][iy][ix+1]-gx_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gx_2[iz][iy][ix+1]-gx_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gx_3[iz][iy][ix+1]-gx_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gx_4[iz][iy][ix+1]-gx_4[iz][iy][ix];
}
}
}
//---------------------- y-direction
for(int iz=0;iz<N;iz++){
for(int iy=2;iy<N-1;iy++){
for(int ix=0;ix<N;ix++){
gy_0[iz][iy][ix] = factor1*
(old_box_0[iz][iy-2][ix]+
7*(old_box_0[iz][iy-1][ix]+old_box_0[iz][iy][ix]) +
old_box_0[iz][iy+1][ix]);
gy_1[iz][iy][ix] = factor1*
(old_box_1[iz][iy-2][ix]+
7*(old_box_1[iz][iy-1][ix]+old_box_1[iz][iy][ix]) +
old_box_1[iz][iy+1][ix]);
gy_2[iz][iy][ix] = factor1*
(old_box_2[iz][iy-2][ix]+
7*(old_box_2[iz][iy-1][ix]+old_box_2[iz][iy][ix]) +
old_box_2[iz][iy+1][ix]);
gy_3[iz][iy][ix] = factor1*
(old_box_3[iz][iy-2][ix]+
7*(old_box_3[iz][iy-1][ix]+old_box_3[iz][iy][ix]) +
old_box_3[iz][iy+1][ix]);
gy_4[iz][iy][ix] = factor1*
(old_box_4[iz][iy-2][ix]+
7*(old_box_4[iz][iy-1][ix]+old_box_4[iz][iy][ix]) +
old_box_4[iz][iy+1][ix]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gy_0[iz][iy][ix] = factor2*gy_0[iz][iy][ix]*gy_3[iz][iy][ix];
gy_1[iz][iy][ix] = factor2*gy_1[iz][iy][ix]*gy_3[iz][iy][ix];
gy_2[iz][iy][ix] = factor2*gy_2[iz][iy][ix]*gy_3[iz][iy][ix];
gy_4[iz][iy][ix] = factor2*gy_4[iz][iy][ix]*gy_3[iz][iy][ix];
gy_3[iz][iy][ix] = factor2*gy_3[iz][iy][ix]*gy_3[iz][iy][ix];
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N-1;iy++){
for(int ix=0;ix<N;ix++){
new_box_0[iz][iy][ix]+= gy_0[iz][iy+1][ix]-gy_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gy_1[iz][iy+1][ix]-gy_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gy_2[iz][iy+1][ix]-gy_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gy_3[iz][iy+1][ix]-gy_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gy_4[iz][iy+1][ix]-gy_4[iz][iy][ix];
}
}
}
//---------------------- z-direction
for(int iz=2;iz<N-1;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gz_0[iz][iy][ix] = factor1*
(old_box_0[iz-2][iy][ix]+
7*(old_box_0[iz-1][iy][ix]+old_box_0[iz][iy][ix]) +
old_box_0[iz+1][iy][ix]);
gz_1[iz][iy][ix] = factor1*
(old_box_1[iz-2][iy][ix]+
7*(old_box_1[iz-1][iy][ix]+old_box_1[iz][iy][ix]) +
old_box_1[iz+1][iy][ix]);
gz_2[iz][iy][ix] = factor1*
(old_box_2[iz-2][iy][ix]+
7*(old_box_2[iz-1][iy][ix]+old_box_2[iz][iy][ix]) +
old_box_2[iz+1][iy][ix]);
gz_3[iz][iy][ix] = factor1*
(old_box_3[iz-2][iy][ix]+
7*(old_box_3[iz-1][iy][ix]+old_box_3[iz][iy][ix]) +
old_box_3[iz+1][iy][ix]);
gz_4[iz][iy][ix] = factor1*
(old_box_4[iz-2][iy][ix]+
7*(old_box_4[iz-1][iy][ix]+old_box_4[iz][iy][ix]) +
old_box_4[iz+1][iy][ix]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gz_0[iz][iy][ix] = factor2*gz_0[iz][iy][ix]*gz_4[iz][iy][ix];
gz_1[iz][iy][ix] = factor2*gz_1[iz][iy][ix]*gz_4[iz][iy][ix];
gz_2[iz][iy][ix] = factor2*gz_2[iz][iy][ix]*gz_4[iz][iy][ix];
gz_3[iz][iy][ix] = factor2*gz_3[iz][iy][ix]*gz_4[iz][iy][ix];
gz_4[iz][iy][ix] = factor2*gz_4[iz][iy][ix]*gz_4[iz][iy][ix];
}
}
}
for(int iz=0;iz<N-1;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
new_box_0[iz][iy][ix]+= gz_0[iz+1][iy][ix]-gz_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gz_1[iz+1][iy][ix]-gz_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gz_2[iz+1][iy][ix]-gz_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gz_3[iz+1][iy][ix]-gz_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gz_4[iz+1][iy][ix]-gz_4[iz][iy][ix];
}
}
}
}
|
test_intel.c | /*
Copyright 2012 Intel Corporation. All Rights Reserved.
The source code contained or described herein and all documents
related to the source code ("Material") are owned by Intel Corporation
or its suppliers or licensors. Title to the Material remains with
Intel Corporation or its suppliers and licensors. The Material is
protected by worldwide copyright and trade secret laws and treaty
provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or
disclosed in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other
intellectual property right is granted to or conferred upon you by
disclosure or delivery of the Materials, either expressly, by
implication, inducement, estoppel or otherwise. Any license under
such intellectual property rights must be express and approved by
Intel in writing.
*/
/*
This is a hello world program utilizing both MPI and OpenMP.
In order to coordinate output, all output is handled by the master
process. Within the master process, first, each thread says hello.
Once this is completed, the master thread waits for MPI sends from
each of the other processes. The first piece of data is how many
threads the process has. This is sent by the master thread of the
remote process. Then, each thread will send a thread ID, process
rank, and processor name to the master process. This will then be
formatted and sent to standard output as a hello from the sending
thread.
*/
// Include the MPI header <mpi.h> and the OpenMP header <omp.h>
// The MPI header should be included before stdio.h.
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
int rank; // Rank ID of the current process
int nproc; // Total number of processes
int nthreads; // Total number of threads
int threadID; // ID of the current thread
int namelen; // Length of the processor name
int required=MPI_THREAD_SERIALIZED; // Required level of MPI threading support
/* Each thread will call MPI routines, but these calls will be coordinated
to occur only one at a time within a process.
*/
int provided; // Provided level of MPI threading support
char name[MPI_MAX_PROCESSOR_NAME]; // Name of the processor
int dThread; // Display thread ID
int dRank; // Display rank ID
int dNamelen; // Length of display name
char dName[MPI_MAX_PROCESSOR_NAME]; // Display processor name
int sNthreads; // nthreads from sender
MPI_Status stat; // Status from MPI calls
int r; // Rank loop counter
int t; // Thread loop counter
// Initialize MPI with threading
MPI_Init_thread(&argc, &argv, required, &provided);
// Determine the MPI rank, number of processes, and processor name
MPI_Comm_size(MPI_COMM_WORLD,&nproc);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name,&namelen);
// Check the threading support level
if (provided < required) {
// Insufficient support, degrade to 1 thread and warn the user
if (rank == 0) {
printf("Warning: This MPI implementation provides insufficient");
printf(" threading support.\n");
}
omp_set_num_threads(1);
}
// The multithreaded section where all threads will say hello
#pragma omp parallel default(shared) private(threadID)
{
// All processes should get the total number of threads, each
// threads needs to know its own ID.
threadID=omp_get_thread_num(); // Get the thread ID
nthreads=omp_get_num_threads(); // Get the total number of threads
// Time to say hello, the master process performs all output.
// Within the master process, each thread will handle its own
// output, the master thread will handle output from all threads
// of all other processes.
if (rank == 0) {
// The master process outputs from its own threads
// This section is done by every OpenMP thread, but only one at a time.
// This requires MPI_THREAD_SERIALIZED.
#pragma omp critical
{
printf("Hello from thread %d of %d in rank %d of %d on %s.\n",
threadID, nthreads, rank, nproc, name);
} // End of #pragma omp critical
#pragma omp barrier
// Now, receive data from each of the other processes and
// give an appropriate greeting. Only the master thread
// should do this. Since only the master thread is calling
// MPI, this is an example of MPI_THREAD_FUNNELED.
#pragma omp master
{
for (r=1;r<nproc;r++) {
// Get the number of threads in the sender
MPI_Recv(&sNthreads, 1, MPI_INT, r, 10*r, MPI_COMM_WORLD, &stat);
for (t=0;t<sNthreads;t++) {
// For each thread, get the rank ID, thread ID, and name
MPI_Recv(&dRank, 1, MPI_INT, r, 10*r+1, MPI_COMM_WORLD, &stat);
MPI_Recv(&dThread, 1, MPI_INT, r, 10*r+2, MPI_COMM_WORLD, &stat);
MPI_Recv(&dNamelen, 1, MPI_INT, r, 1000*r+10*dThread, MPI_COMM_WORLD, &stat);
MPI_Recv(dName, dNamelen+1, MPI_CHAR, r, 1000*r+10*dThread+1, MPI_COMM_WORLD, &stat);
printf("Hello from thread %d of %d in rank %d of %d on %s.\n",
dThread, sNthreads, dRank, nproc, dName);
}
}
} // End of #pragma omp master
} else { // All other processes will send their data to the master
// Only the master sends the number of threads. MPI_THREAD_FUNNELED
#pragma omp master
{
MPI_Send(&nthreads, 1, MPI_INT, 0, 10*rank, MPI_COMM_WORLD);
} // End of #pragma omp master
#pragma omp critical
{
// Each thread will send its own data, but there is no
// particular order required, so a critical section works
// exactly as needed. As such, this requires MPI_THREAD_SERIALIZED
MPI_Send(&rank, 1, MPI_INT, 0, 10*rank+1, MPI_COMM_WORLD);
MPI_Send(&threadID, 1, MPI_INT, 0, 10*rank+2, MPI_COMM_WORLD);
MPI_Send(&namelen, 1, MPI_INT, 0, 1000*rank+10*threadID, MPI_COMM_WORLD);
MPI_Send(name, namelen+1, MPI_CHAR, 0, 1000*rank+10*threadID+1, MPI_COMM_WORLD);
} // End of #pragma omp critical
}
} // End of #pragma omp parallel
// Close out MPI and the program
MPI_Finalize();
return 0;
}
|
ex2.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#define THREADS 5
/*
calculting the value of PI using the following appr intergal(4/(1+x^2)) = PI
using of cours openmp
*/
int main(){
int num_steps = 10000,tsize;
double step = 1.0 /( (double) num_steps),PI = 0;
double* psum = malloc(sizeof(double)*THREADS);
double start = omp_get_wtime();
#pragma omp parallel num_threads(THREADS)
{
int id = omp_get_thread_num();
int size = omp_get_num_threads();
if(id == 0) tsize=size;
for(int i=id;i<num_steps;i+=size){
double x = (0.5+i)*step;
double f = 4.0 /( (double) 1+x*x);
psum[id]+=f;
}
}
double time = omp_get_wtime()-start;
for(int i=0;i<tsize;i++) PI+=psum[i];
PI*=step;
printf("PI=%lf took around %lf\n",PI,time);
return 0;
}
|
resample_utils.h | /*
Copyright 2020 MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// We need to define AT_PARALLEL_OPENMP (even if -fopenmp is
// not used) so that at::parallel_for is defined somewhere.
// This must be done before <ATen/Parallel.h> is included.
//
// Note that if AT_PARALLEL_OPENMP = 1 but compilation does not use
// -fopenmp, omp pragmas will be ignored. In that case, the code will
// be effectively sequential, and we don't have to worry about
// operations being atomic.
#if !(AT_PARALLEL_OPENMP)
#if !(AT_PARALLEL_NATIVE)
#if !(AT_PARALLEL_NATIVE_TBB)
#error No parallel backend specified
#endif
#endif
#endif
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// These are defines that help writing generic code for both GPU and CPU
#ifdef __CUDACC__
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <THC/THCAtomics.cuh>
#define MONAI_INLINE __forceinline__
#define MONAI_DEVICE __device__
#define MONAI_HOST __host__
#define MONAI_ATOMIC_ADD monai::gpuAtomicAdd
#define MONAI_NAMESPACE_DEVICE namespace cuda
namespace monai {
// atomicAdd API changed between pytorch 1.4 and 1.5.
template <typename scalar_t, typename offset_t>
static __forceinline__ __device__ void gpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) {
#if MONAI_TORCH_VERSION >= 10500
::gpuAtomicAdd(ptr + offset, value);
#else
::atomicAdd(ptr + offset, value);
#endif
}
} // namespace monai
#else
#define MONAI_INLINE inline
#define MONAI_DEVICE
#define MONAI_HOST
#define MONAI_ATOMIC_ADD monai::cpuAtomicAdd
#define MONAI_NAMESPACE_DEVICE namespace cpu
namespace monai {
template <typename scalar_t, typename offset_t>
static inline void cpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) {
#if AT_PARALLEL_OPENMP
#pragma omp atomic
#endif
ptr[offset] += value;
}
} // namespace monai
#endif
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#include <ATen/ATen.h>
namespace monai {
enum class BoundType : int64_t {
Replicate, // Replicate last inbound value = clip coordinates
DCT1, // Symmetric w.r.t. center of the last inbound voxel
DCT2, // Symmetric w.r.t. edge of the last inbound voxel (=Neuman)
DST1, // Asymmetric w.r.t. center of the last inbound voxel
DST2, // Asymmetric w.r.t. edge of the last inbound voxel (=Dirichlet)
DFT, // Circular / Wrap around the FOV
Sliding, // For deformation-fields only: mixture of DCT2 and DST2
Zero, // Zero outside of the FOV
NoCheck // /!\ Checks disabled: assume coordinates are inbound
};
using BoundVectorRef = c10::ArrayRef<BoundType>;
enum class InterpolationType : int64_t {
Nearest,
Linear,
Quadratic,
Cubic,
FourthOrder,
FifthOrder,
SixthOrder,
SeventhOrder
};
using InterpolationVectorRef = c10::ArrayRef<InterpolationType>;
static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const BoundType& bound) {
switch (bound) {
case BoundType::Replicate:
return os << "Replicate";
case BoundType::DCT1:
return os << "DCT1";
case BoundType::DCT2:
return os << "DCT2";
case BoundType::DST1:
return os << "DST1";
case BoundType::DST2:
return os << "DST2";
case BoundType::DFT:
return os << "DFT";
case BoundType::Zero:
return os << "Zero";
case BoundType::Sliding:
return os << "Sliding";
case BoundType::NoCheck:
return os << "NoCheck";
}
return os << "Unknown bound";
}
static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const InterpolationType& itp) {
switch (itp) {
case InterpolationType::Nearest:
return os << "Nearest";
case InterpolationType::Linear:
return os << "Linear";
case InterpolationType::Quadratic:
return os << "Quadratic";
case InterpolationType::Cubic:
return os << "Cubic";
case InterpolationType::FourthOrder:
return os << "FourthOrder";
case InterpolationType::FifthOrder:
return os << "FifthOrder";
case InterpolationType::SixthOrder:
return os << "SixthOrder";
case InterpolationType::SeventhOrder:
return os << "SeventhOrder";
}
return os << "Unknown interpolation order";
}
} // namespace monai
|
nested.c | #include <stdio.h>
#include <omp.h>
int main( )
{
omp_set_nested(1);
omp_set_num_threads(4);
printf("%d\n", omp_get_nested( ));
#pragma omp parallel
#pragma omp master
{
printf("%d\n", omp_get_nested( ));
}
}
|
ast-dump-openmp-begin-declare-variant_12.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// expected-no-diagnostics
#ifdef __cplusplus
#define OVERLOADABLE
#else
#define OVERLOADABLE __attribute__((overloadable))
#endif
OVERLOADABLE
int also_before(void) {
return 1;
}
OVERLOADABLE
int also_before(int i) {
return 2;
}
OVERLOADABLE
int also_before(float f) {
return 0;
}
OVERLOADABLE
int also_before(double d) {
return 3;
}
OVERLOADABLE
int also_before(long l) {
return 4;
}
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
OVERLOADABLE
int also_before(void) {
return 0;
}
OVERLOADABLE
int also_before(int i) {
return 0;
}
// No float!
OVERLOADABLE
int also_before(double d) {
return 0;
}
OVERLOADABLE
int also_before(long l) {
return 0;
}
#pragma omp end declare variant
int main() {
// Should return 0.
return also_before() + also_before(1) + also_before(2.0f) + also_before(3.0) + also_before(4L);
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:14:1> line:12:5 used also_before 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:14:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:13:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: | |-OverloadableAttr [[ADDR_4:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_5:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_6:0x[a-z0-9]*]] <col:22> 'int ({{.*}})' Function [[ADDR_7:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <col:22, line:18:1> line:16:5 used also_before 'int (int)'
// C-NEXT: | |-ParmVarDecl [[ADDR_9:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int'
// C-NEXT: | |-CompoundStmt [[ADDR_10:0x[a-z0-9]*]] <col:24, line:18:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_11:0x[a-z0-9]*]] <line:17:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_12:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: | |-OverloadableAttr [[ADDR_13:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_14:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_15:0x[a-z0-9]*]] <col:22> 'int (int)' Function [[ADDR_16:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (int)'
// C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] <col:22, line:22:1> line:20:5 used also_before 'int (float)'
// C-NEXT: | |-ParmVarDecl [[ADDR_18:0x[a-z0-9]*]] <col:17, col:23> col:23 f 'float'
// C-NEXT: | |-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:26, line:22:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <line:21:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OverloadableAttr [[ADDR_22:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: |-FunctionDecl [[ADDR_23:0x[a-z0-9]*]] <col:22, line:26:1> line:24:5 used also_before 'int (double)'
// C-NEXT: | |-ParmVarDecl [[ADDR_24:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double'
// C-NEXT: | |-CompoundStmt [[ADDR_25:0x[a-z0-9]*]] <col:27, line:26:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_26:0x[a-z0-9]*]] <line:25:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int' 3
// C-NEXT: | |-OverloadableAttr [[ADDR_28:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30:0x[a-z0-9]*]] <col:22> 'int (double)' Function [[ADDR_31:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (double)'
// C-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <col:22, line:30:1> line:28:5 used also_before 'int (long)'
// C-NEXT: | |-ParmVarDecl [[ADDR_33:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long'
// C-NEXT: | |-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:25, line:30:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:29:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int' 4
// C-NEXT: | |-OverloadableAttr [[ADDR_37:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_38:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:22> 'int (long)' Function [[ADDR_40:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (long)'
// C-NEXT: |-FunctionDecl [[ADDR_7]] <col:22, line:36:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <line:34:23, line:36:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <line:35:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OverloadableAttr [[ADDR_44:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: |-FunctionDecl [[ADDR_16]] <col:22, line:40:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (int)'
// C-NEXT: | |-ParmVarDecl [[ADDR_45:0x[a-z0-9]*]] <line:38:17, col:21> col:21 i 'int'
// C-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:24, line:40:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:39:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OverloadableAttr [[ADDR_49:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: |-FunctionDecl [[ADDR_31]] <col:22, line:45:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (double)'
// C-NEXT: | |-ParmVarDecl [[ADDR_50:0x[a-z0-9]*]] <line:43:17, col:24> col:24 d 'double'
// C-NEXT: | |-CompoundStmt [[ADDR_51:0x[a-z0-9]*]] <col:27, line:45:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_52:0x[a-z0-9]*]] <line:44:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_53:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OverloadableAttr [[ADDR_54:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: |-FunctionDecl [[ADDR_40]] <col:22, line:49:1> line:8:22 also_before[implementation={vendor(llvm)}] 'int (long)'
// C-NEXT: | |-ParmVarDecl [[ADDR_55:0x[a-z0-9]*]] <line:47:17, col:22> col:22 l 'long'
// C-NEXT: | |-CompoundStmt [[ADDR_56:0x[a-z0-9]*]] <col:25, line:49:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_57:0x[a-z0-9]*]] <line:48:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_58:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OverloadableAttr [[ADDR_59:0x[a-z0-9]*]] <line:8:37>
// C-NEXT: `-FunctionDecl [[ADDR_60:0x[a-z0-9]*]] <line:53:1, line:56:1> line:53:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_61:0x[a-z0-9]*]] <col:12, line:56:1>
// C-NEXT: `-ReturnStmt [[ADDR_62:0x[a-z0-9]*]] <line:55:3, col:96>
// C-NEXT: `-BinaryOperator [[ADDR_63:0x[a-z0-9]*]] <col:10, col:96> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_64:0x[a-z0-9]*]] <col:10, col:78> 'int' '+'
// C-NEXT: | |-BinaryOperator [[ADDR_65:0x[a-z0-9]*]] <col:10, col:59> 'int' '+'
// C-NEXT: | | |-BinaryOperator [[ADDR_66:0x[a-z0-9]*]] <col:10, col:39> 'int' '+'
// C-NEXT: | | | |-PseudoObjectExpr [[ADDR_67:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | | |-CallExpr [[ADDR_68:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | | `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// C-NEXT: | | | | `-CallExpr [[ADDR_71:0x[a-z0-9]*]] <line:8:22, line:55:22> 'int'
// C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <line:8:22> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_6]] <col:22> 'int ({{.*}})' Function [[ADDR_7]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | | | `-PseudoObjectExpr [[ADDR_73:0x[a-z0-9]*]] <line:55:26, col:39> 'int'
// C-NEXT: | | | |-CallExpr [[ADDR_74:0x[a-z0-9]*]] <col:26, col:39> 'int'
// C-NEXT: | | | | |-ImplicitCastExpr [[ADDR_75:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay>
// C-NEXT: | | | | | `-DeclRefExpr [[ADDR_76:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_8]] 'also_before' 'int (int)'
// C-NEXT: | | | | `-IntegerLiteral [[ADDR_77:0x[a-z0-9]*]] <col:38> 'int' 1
// C-NEXT: | | | `-CallExpr [[ADDR_78:0x[a-z0-9]*]] <line:8:22, line:55:39> 'int'
// C-NEXT: | | | |-ImplicitCastExpr [[ADDR_79:0x[a-z0-9]*]] <line:8:22> 'int (*)(int)' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_15]] <col:22> 'int (int)' Function [[ADDR_16]] 'also_before[implementation={vendor(llvm)}]' 'int (int)'
// C-NEXT: | | | `-IntegerLiteral [[ADDR_77]] <line:55:38> 'int' 1
// C-NEXT: | | `-CallExpr [[ADDR_80:0x[a-z0-9]*]] <col:43, col:59> 'int'
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_81:0x[a-z0-9]*]] <col:43> 'int (*)(float)' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_82:0x[a-z0-9]*]] <col:43> 'int (float)' {{.*}}Function [[ADDR_17]] 'also_before' 'int (float)'
// C-NEXT: | | `-FloatingLiteral [[ADDR_83:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// C-NEXT: | `-PseudoObjectExpr [[ADDR_84:0x[a-z0-9]*]] <col:63, col:78> 'int'
// C-NEXT: | |-CallExpr [[ADDR_85:0x[a-z0-9]*]] <col:63, col:78> 'int'
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_86:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_87:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_23]] 'also_before' 'int (double)'
// C-NEXT: | | `-FloatingLiteral [[ADDR_88:0x[a-z0-9]*]] <col:75> 'double' 3.000000e+00
// C-NEXT: | `-CallExpr [[ADDR_89:0x[a-z0-9]*]] <line:8:22, line:55:78> 'int'
// C-NEXT: | |-ImplicitCastExpr [[ADDR_90:0x[a-z0-9]*]] <line:8:22> 'int (*)(double)' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_30]] <col:22> 'int (double)' Function [[ADDR_31]] 'also_before[implementation={vendor(llvm)}]' 'int (double)'
// C-NEXT: | `-FloatingLiteral [[ADDR_88]] <line:55:75> 'double' 3.000000e+00
// C-NEXT: `-PseudoObjectExpr [[ADDR_91:0x[a-z0-9]*]] <col:82, col:96> 'int'
// C-NEXT: |-CallExpr [[ADDR_92:0x[a-z0-9]*]] <col:82, col:96> 'int'
// C-NEXT: | |-ImplicitCastExpr [[ADDR_93:0x[a-z0-9]*]] <col:82> 'int (*)(long)' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_94:0x[a-z0-9]*]] <col:82> 'int (long)' {{.*}}Function [[ADDR_32]] 'also_before' 'int (long)'
// C-NEXT: | `-IntegerLiteral [[ADDR_95:0x[a-z0-9]*]] <col:94> 'long' 4
// C-NEXT: `-CallExpr [[ADDR_96:0x[a-z0-9]*]] <line:8:22, line:55:96> 'int'
// C-NEXT: |-ImplicitCastExpr [[ADDR_97:0x[a-z0-9]*]] <line:8:22> 'int (*)(long)' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_39]] <col:22> 'int (long)' Function [[ADDR_40]] 'also_before[implementation={vendor(llvm)}]' 'int (long)'
// C-NEXT: `-IntegerLiteral [[ADDR_95]] <line:55:94> 'long' 4
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:14:1> line:12:5 used also_before 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:14:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:13:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:34:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:16:1, line:18:1> line:16:5 used also_before 'int (int)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_8:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int'
// CXX-NEXT: | |-CompoundStmt [[ADDR_9:0x[a-z0-9]*]] <col:24, line:18:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_10:0x[a-z0-9]*]] <line:17:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_11:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_12:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_13:0x[a-z0-9]*]] <line:38:1> 'int (int)' Function [[ADDR_14:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (int)'
// CXX-NEXT: |-FunctionDecl [[ADDR_15:0x[a-z0-9]*]] <line:20:1, line:22:1> line:20:5 used also_before 'int (float)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_16:0x[a-z0-9]*]] <col:17, col:23> col:23 f 'float'
// CXX-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:26, line:22:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:21:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] <line:24:1, line:26:1> line:24:5 used also_before 'int (double)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_21:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double'
// CXX-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:27, line:26:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:25:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:10> 'int' 3
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_25:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_26:0x[a-z0-9]*]] <line:43:1> 'int (double)' Function [[ADDR_27:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (double)'
// CXX-NEXT: |-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:28:1, line:30:1> line:28:5 used also_before 'int (long)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_29:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long'
// CXX-NEXT: | |-CompoundStmt [[ADDR_30:0x[a-z0-9]*]] <col:25, line:30:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_31:0x[a-z0-9]*]] <line:29:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int' 4
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_33:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_34:0x[a-z0-9]*]] <line:47:1> 'int (long)' Function [[ADDR_35:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int (long)'
// CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:34:1, line:36:1> line:34:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_36:0x[a-z0-9]*]] <col:23, line:36:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_37:0x[a-z0-9]*]] <line:35:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_38:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_14]] <line:38:1, line:40:1> line:38:1 also_before[implementation={vendor(llvm)}] 'int (int)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_39:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int'
// CXX-NEXT: | `-CompoundStmt [[ADDR_40:0x[a-z0-9]*]] <col:24, line:40:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_41:0x[a-z0-9]*]] <line:39:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_42:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_27]] <line:43:1, line:45:1> line:43:1 also_before[implementation={vendor(llvm)}] 'int (double)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_43:0x[a-z0-9]*]] <col:17, col:24> col:24 d 'double'
// CXX-NEXT: | `-CompoundStmt [[ADDR_44:0x[a-z0-9]*]] <col:27, line:45:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_45:0x[a-z0-9]*]] <line:44:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_46:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_35]] <line:47:1, line:49:1> line:47:1 also_before[implementation={vendor(llvm)}] 'int (long)'
// CXX-NEXT: | |-ParmVarDecl [[ADDR_47:0x[a-z0-9]*]] <col:17, col:22> col:22 l 'long'
// CXX-NEXT: | `-CompoundStmt [[ADDR_48:0x[a-z0-9]*]] <col:25, line:49:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_49:0x[a-z0-9]*]] <line:48:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_50:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: `-FunctionDecl [[ADDR_51:0x[a-z0-9]*]] <line:53:1, line:56:1> line:53:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_52:0x[a-z0-9]*]] <col:12, line:56:1>
// CXX-NEXT: `-ReturnStmt [[ADDR_53:0x[a-z0-9]*]] <line:55:3, col:96>
// CXX-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:96> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:78> 'int' '+'
// CXX-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:59> 'int' '+'
// CXX-NEXT: | | |-BinaryOperator [[ADDR_57:0x[a-z0-9]*]] <col:10, col:39> 'int' '+'
// CXX-NEXT: | | | |-PseudoObjectExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | | |-CallExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | | `-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CXX-NEXT: | | | | `-CallExpr [[ADDR_62:0x[a-z0-9]*]] <line:34:1, line:55:22> 'int'
// CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_63:0x[a-z0-9]*]] <line:34:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | | | `-PseudoObjectExpr [[ADDR_64:0x[a-z0-9]*]] <line:55:26, col:39> 'int'
// CXX-NEXT: | | | |-CallExpr [[ADDR_65:0x[a-z0-9]*]] <col:26, col:39> 'int'
// CXX-NEXT: | | | | |-ImplicitCastExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay>
// CXX-NEXT: | | | | | `-DeclRefExpr [[ADDR_67:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_7]] 'also_before' 'int (int)'
// CXX-NEXT: | | | | `-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:38> 'int' 1
// CXX-NEXT: | | | `-CallExpr [[ADDR_69:0x[a-z0-9]*]] <line:38:1, line:55:39> 'int'
// CXX-NEXT: | | | |-ImplicitCastExpr [[ADDR_70:0x[a-z0-9]*]] <line:38:1> 'int (*)(int)' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_13]] <col:1> 'int (int)' Function [[ADDR_14]] 'also_before[implementation={vendor(llvm)}]' 'int (int)'
// CXX-NEXT: | | | `-IntegerLiteral [[ADDR_68]] <line:55:38> 'int' 1
// CXX-NEXT: | | `-CallExpr [[ADDR_71:0x[a-z0-9]*]] <col:43, col:59> 'int'
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <col:43> 'int (*)(float)' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_73:0x[a-z0-9]*]] <col:43> 'int (float)' {{.*}}Function [[ADDR_15]] 'also_before' 'int (float)'
// CXX-NEXT: | | `-FloatingLiteral [[ADDR_74:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// CXX-NEXT: | `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <col:63, col:78> 'int'
// CXX-NEXT: | |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:63, col:78> 'int'
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_20]] 'also_before' 'int (double)'
// CXX-NEXT: | | `-FloatingLiteral [[ADDR_79:0x[a-z0-9]*]] <col:75> 'double' 3.000000e+00
// CXX-NEXT: | `-CallExpr [[ADDR_80:0x[a-z0-9]*]] <line:43:1, line:55:78> 'int'
// CXX-NEXT: | |-ImplicitCastExpr [[ADDR_81:0x[a-z0-9]*]] <line:43:1> 'int (*)(double)' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_26]] <col:1> 'int (double)' Function [[ADDR_27]] 'also_before[implementation={vendor(llvm)}]' 'int (double)'
// CXX-NEXT: | `-FloatingLiteral [[ADDR_79]] <line:55:75> 'double' 3.000000e+00
// CXX-NEXT: `-PseudoObjectExpr [[ADDR_82:0x[a-z0-9]*]] <col:82, col:96> 'int'
// CXX-NEXT: |-CallExpr [[ADDR_83:0x[a-z0-9]*]] <col:82, col:96> 'int'
// CXX-NEXT: | |-ImplicitCastExpr [[ADDR_84:0x[a-z0-9]*]] <col:82> 'int (*)(long)' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_85:0x[a-z0-9]*]] <col:82> 'int (long)' {{.*}}Function [[ADDR_28]] 'also_before' 'int (long)'
// CXX-NEXT: | `-IntegerLiteral [[ADDR_86:0x[a-z0-9]*]] <col:94> 'long' 4
// CXX-NEXT: `-CallExpr [[ADDR_87:0x[a-z0-9]*]] <line:47:1, line:55:96> 'int'
// CXX-NEXT: |-ImplicitCastExpr [[ADDR_88:0x[a-z0-9]*]] <line:47:1> 'int (*)(long)' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_34]] <col:1> 'int (long)' Function [[ADDR_35]] 'also_before[implementation={vendor(llvm)}]' 'int (long)'
// CXX-NEXT: `-IntegerLiteral [[ADDR_86]] <line:55:94> 'long' 4
|
mixed_tentusscher_myo_epi_2004_S1_10.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_10.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6838324354870,0.00125861532112849,0.782531022046086,0.782339747189463,0.000171890042887048,0.486292974625246,0.00291620263464322,0.999998385881296,1.89662206007922e-08,1.86215027927160e-05,0.999772605771797,1.00726021297708,0.999997303478017,4.12841986112007e-05,0.532834056372551,10.1676164688339,139.406744560708}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.3749143448083,0.000106604137464535,0.000163318890080367,0.000429744441186708,0.266262734691489,0.186959465205765,0.123266589202760,3.29275100569290,0.0156860789687658,1.60533641843158,1092.89191410508,0.000432813436608356,0.580002884264747,0.0196175412254594,0.00284974997889632,3.23076589501517e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
fdtd2d.c | /**
* fdtd2d.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define tmax 500
#define NX SIZE
#define NY SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int i, j;
for (i = 0; i < tmax; i++) {
_fict_[i] = (DATA_TYPE)i;
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX;
ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX;
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
void init_array_hz(DATA_TYPE *hz) {
int i, j;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
int compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) {
int i, j, fail;
fail = 0;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
return fail;
}
void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) {
int t, i, j;
for (t = 0; t < tmax; t++) {
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
void runFdtd_OMP(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int t, i, j;
#pragma omp target data map(to : _fict_[ : tmax], ex[ : (NX *(NY + 1))], ey[ : ((NX + 1) * NY)]) map(tofrom : hz[ : NX *NY]) device(OMP_DEVICE_ID)
{
for (t = 0; t < tmax; t++) {
#pragma omp target teams distribute parallel for device(OMP_DEVICE_ID)
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
#pragma omp target teams distribute parallel for collapse(2) device(OMP_DEVICE_ID)
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
#pragma omp target teams distribute parallel for collapse(2) device(OMP_DEVICE_ID)
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
#pragma omp target teams distribute parallel for collapse(2) device(OMP_DEVICE_ID)
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
}
int main() {
fprintf(stdout, "<< 2-D Finite Different Time Domain Kernel >>\n");
// declare arrays and allocate memory
DATA_TYPE *_fict_ = (DATA_TYPE *)calloc(tmax, sizeof(DATA_TYPE));
DATA_TYPE *ex = (DATA_TYPE *)calloc(NX * (NY + 1), sizeof(DATA_TYPE));
DATA_TYPE *ey = (DATA_TYPE *)calloc((NX + 1) * NY, sizeof(DATA_TYPE));
DATA_TYPE *hz = NULL;
DATA_TYPE *hz_outputFromGpu = NULL;
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
// allocate
hz_outputFromGpu = (DATA_TYPE *) malloc(NX * NY * sizeof(DATA_TYPE));
// init arrays
init_arrays(_fict_, ex, ey, hz_outputFromGpu);
// benchmark
BENCHMARK_OMP(runFdtd_OMP(_fict_, ex, ey, hz_outputFromGpu));
// prevent dead-code elimination
DCE_PREVENT(hz_outputFromGpu, NX*NY);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
// reset memory for common/shared arrays
// NOTE: it seems the init_arrays does not overwrite all memory positions
// on ex and ey. In turn, that seems to affect the final output and generate
// errors. Since I dont know the algorithm, this is a quick fix that resets
// the arrays with 0s, the then initializes everything again
memset(_fict_, 0, tmax * sizeof(DATA_TYPE));
memset(ex, 0, NX * (NY + 1) * sizeof(DATA_TYPE));
memset(ey, 0, (NX + 1) * NY * sizeof(DATA_TYPE));
// allocate
hz = (DATA_TYPE *) malloc(NX * NY * sizeof(DATA_TYPE));
// init arrays
init_arrays(_fict_, ex, ey, hz);
// benchmark
BENCHMARK_CPU(runFdtd(_fict_, ex, ey, hz));
// prevent dead-code elimination
DCE_PREVENT(hz, NX*NY);
#endif
// if TEST is enabled, then compare OMP results against sequential mode
int fail = 0;
#ifdef RUN_TEST
fail = compareResults(hz, hz_outputFromGpu);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// release memory
free(_fict_);
free(ex);
free(ey);
free(hz);
free(hz_outputFromGpu);
return fail;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(24*t3+Nx+20,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
ProgressBar.h | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/config.h"
#include "saiga/core/math/imath.h"
#include "saiga/core/time/all.h"
#include "saiga/core/util/Thread/SpinLock.h"
#include "saiga/core/util/Thread/threadName.h"
#include "saiga/core/util/assert.h"
#include "saiga/core/util/tostring.h"
#include <atomic>
#include <iostream>
#include <mutex>
#include <string>
#include <condition_variable>
namespace Saiga
{
/**
* A synchronized progress bar for console output.
* You must not write to the given stream while the progress bar is active.
*
* Usage Parallel Image loading:
*
* ProgressBar loadingBar(std::cout, "Loading " + to_string(N) + " images ", N);
* #pragma omp parallel for
* for (int i = 0; i < N; ++i)
* {
* images[i].load("...");
* loadingBar.addProgress(1);
* }
*
*/
struct ProgressBar
{
ProgressBar(std::ostream& strm, const std::string header, int end, int length = 30,
bool show_remaining_time = false, int update_time_ms = 100, std::string element_name = "e")
: strm(strm),
prefix(header),
end(end),
length(length),
show_remaining_time(show_remaining_time),
update_time_ms(update_time_ms),
element_name(element_name)
{
SAIGA_ASSERT(end >= 0);
print();
if (end > 0)
{
run();
}
timer.start();
}
~ProgressBar() { Quit(); }
void addProgress(int i) { current += i; }
void SetPostfix(const std::string& str)
{
std::unique_lock l(lock);
postfix = str;
}
void Quit()
{
running = false;
cv.notify_one();
if (st.joinable())
{
st.join();
}
}
private:
TimerBase timer;
std::ostream& strm;
ScopedThread st;
std::string prefix;
std::string postfix;
std::atomic_bool running = true;
std::atomic_int current = 0;
std::mutex lock;
std::condition_variable cv;
int end;
int length;
bool show_remaining_time;
int update_time_ms;
std::string element_name;
void run()
{
st = ScopedThread(
[this]()
{
while (running && current.load() < end)
{
print();
std::unique_lock<std::mutex> l(lock);
cv.wait_for(l, std::chrono::milliseconds(update_time_ms));
}
print();
strm << std::endl;
});
}
void print()
{
auto f = strm.flags();
// SAIGA_ASSERT(current <= end);
double progress = end == 0 ? 0 : double(current) / end;
auto time = timer.stop();
int progress_pro = iRound(progress * 100);
int barLength = progress * length;
strm << "\r" << prefix << " ";
strm << std::setw(3) << progress_pro << "%";
{
// bar
strm << " |";
for (auto i = 0; i < barLength; ++i)
{
strm << "#";
}
for (auto i = barLength; i < length; ++i)
{
strm << " ";
}
strm << "| ";
}
{
// element count
auto end_str = to_string(end);
strm << std::setw(end_str.size()) << current << "/" << end << " ";
}
{
// Time
strm << "[" << DurationToString(time);
if (show_remaining_time)
{
auto remaining_time = time * (1 / progress) - time;
strm << "<" << DurationToString(remaining_time);
}
strm << "] ";
}
{
// performance stats
double s = std::chrono::duration_cast<std::chrono::duration<double>>(time).count();
double ele_per_second = current / s;
strm << "[" << std::setprecision(2) << std::fixed << ele_per_second << " " << element_name << "/s]";
}
{
std::unique_lock l(lock);
strm << " " << postfix;
}
strm << std::flush;
strm << std::setprecision(6);
strm.flags(f);
}
};
} // namespace Saiga
|
consecutive_write_memcpy.c | #include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "constants.h"
/**
* Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files
* Note that this is probably a slow function, and is not meant to be run real-time
*
* data in: tab, channel/4, time/500 packets of time,channel,pn
* data out: tab, channel, pol, time
*
* Suggested use is:
* 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk
* 2. offline: dada_dbdisk -> ringbuffer -> dadafits
*
* @param {const char *} page Ringbuffer page with interleaved data
* @param {const char *} transposed
* @param {int} ntabs Number of tabs
* @param {int} nchannels Number of channels
* @param {int} npackets Number of packets per sequence
*/
void deinterleave (const unsigned char *page, unsigned char *transposed, const int ntabs, const int nchannels, const int npackets) {
const unsigned char *packet = page;
int tab = 0;
for (tab = 0; tab < ntabs; tab++) {
int channel_offset = 0;
for (channel_offset = 0; channel_offset < nchannels; channel_offset+=4) {
const unsigned char *src = &page[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
unsigned char *dest = &transposed[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
int pc = 0;
#pragma omp parallel for
for (pc=0; pc < 16; pc+=4) {
unsigned char temp[npackets*NSAMPS*4];
unsigned char *tempA = &temp[0 * npackets*NSAMPS];
unsigned char *tempB = &temp[1 * npackets*NSAMPS];
unsigned char *tempC = &temp[2 * npackets*NSAMPS];
unsigned char *tempD = &temp[3 * npackets*NSAMPS];
int tn = 0;
for (tn = 0; tn < npackets*NSAMPS; tn++) {
tempA[tn] = src[tn*NPOLS*NCHANS + pc + 0];
tempB[tn] = src[tn*NPOLS*NCHANS + pc + 1];
tempC[tn] = src[tn*NPOLS*NCHANS + pc + 2];
tempD[tn] = src[tn*NPOLS*NCHANS + pc + 3];
}
memcpy(&dest[pc*npackets*NSAMPS], temp, 4*npackets*NSAMPS);
}
} // channel_offset
} // tab
}
|
MatrixOpenMP.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
double **AllocateValueMatrix(unsigned int N, double value) {
double **M;
M = (double **) malloc(N * sizeof(double *));
for (int i = 0; i < N; ++i) {
M[i] = (double *) malloc(N * sizeof(double));
for (int j = 0; j < N; ++j) {
M[i][j] = value;
}
}
return M;
}
double **AllocateRandomMatrix(unsigned int N) {
double **M;
M = (double **) malloc(N * sizeof(double *));
for (int i = 0; i < N; ++i) {
M[i] = (double *) malloc(N * sizeof(double));
for (int j = 0; j < N; ++j) {
M[i][j] = rand();
}
}
return M;
}
void FreeMatrix(double **M, unsigned int N) {
for (int i = 0; i < N; ++i) {
free(M[i]);
}
free(M);
}
void PrintMatrix(double **M, unsigned int N) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%f ", M[i][j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]) {
unsigned int threads = 8u;
unsigned int N = 1024u;
if (argc > 1) {
threads = (unsigned int) atoi(argv[1]);
}
if (argc > 2) {
N = (unsigned int) atoi(argv[2]);
}
double **A;
double **B;
double **C;
A = AllocateRandomMatrix(N);
B = AllocateRandomMatrix(N);
C = AllocateValueMatrix(N, 0.0);
omp_set_num_threads(threads);
#pragma omp parallel for
for (unsigned int i = 0u; i < N; ++i) {
for (unsigned int j = 0u; j < N; ++j) {
for (unsigned int k = 0u; k < N; ++k) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
printf("Threads\t%d\n", threads);
printf("Size\t%dx%d\n", N, N);
//printf("Result\n");
//PrintMatrix(C, N);
FreeMatrix(A, N);
FreeMatrix(B, N);
FreeMatrix(C, N);
return EXIT_SUCCESS;
}
|
parallel_dgemm.c | /* Copyright 2019 Los Alamos National Laboratory
* Copyright 2009-2018 Purdue University and Purdue University Research Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*******************************************************************************
* This example demonstrates threading impact on computing real matrix product
* C=alpha*A*B+beta*C using Intel(R) MKL subroutine DGEMM, where A, B, and C
* are matrices and alpha and beta are double precision scalars.
*
* In this simple example, practices such as memory management, data alignment,
* and I/O that are necessary for good programming style and high Intel(R) MKL
* performance are omitted to improve readability.
********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <mkl.h>
#include <omp.h>
#include <string.h>
#include <sys/time.h>
double get_cur_time(){
struct timeval tv;
struct timezone tz;
double cur_time;
gettimeofday(&tv, &tz);
cur_time= tv.tv_sec + tv.tv_usec/1000000.0;
return cur_time;
}
/* Consider adjusting LOOP_COUNT based on the performance of your computer */
/* to make sure that total run time is at least 1 second */
#define LOOP_COUNT 50
int main(int argc, char** argv)
{
double time[128];
int N = 256;
int loop_cnt;
int nb_threads = 1;
//parse command line
for (int k = 1; k < argc; k++) {
if (!strcmp(argv[k], "-size")) {
N = atoi(argv[++k]);
}
if (!strcmp(argv[k], "-cnt")) {
loop_cnt = atoi(argv[++k]);
}
if (!strcmp(argv[k], "-thread")) {
nb_threads = atoi(argv[++k]);
}
}
omp_set_dynamic(0);
omp_set_num_threads(nb_threads);
#pragma omp parallel
{
int myid = omp_get_thread_num();
double *A = NULL;
double *B = NULL;
double *C = NULL;
int i, j, r, max_threads;
double alpha, beta;
double s_initial, s_elapsed;
loop_cnt = LOOP_COUNT;
alpha = 1.0; beta = 1.0;
A = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
B = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
C = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
if (A == NULL || B == NULL || C == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
if (A != NULL) mkl_free(A);
if (B != NULL) mkl_free(B);
if (C != NULL) mkl_free(C);
// return 1;
}
for (i = 0; i < (N * N); i++) {
A[i] = (double)(i+1);
B[i] = (double)(-i-1);
C[i] = 0.0;
}
mkl_set_num_threads(1);
// warmup
for (r = 0; r < 10; r++) {
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
N, N, N, alpha, A, N, B, N, beta, C, N);
}
s_initial = get_cur_time();
for (r = 0; r < loop_cnt; r++) {
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
N, N, N, alpha, A, N, B, N, beta, C, N);
}
s_elapsed = (get_cur_time() - s_initial) / loop_cnt;
time[myid] = s_elapsed * 1000;
printf ("Thread #%d, MKL DGEMM N=%d, time %.5f milliseconds, GFLOPS=%.3f\n",
myid, N, (s_elapsed * 1000), 2*(double)N*(double)N*(double)N/s_elapsed*1e-9);
mkl_free(A);
mkl_free(B);
mkl_free(C);
if (s_elapsed < 0.9/loop_cnt) {
s_elapsed=1.0/loop_cnt/s_elapsed;
i=(int)(s_elapsed*loop_cnt)+1;
if (myid == 0)
printf(" It is highly recommended to define LOOP_COUNT for this example on your \n"
" computer as %i to have total execution time about 1 second for reliability \n"
" of measurements\n\n", i);
}
}
// compute average
double average=0.0;
for (int i=0; i < nb_threads; i++)
average += time[i];
average = average / nb_threads;
printf(" AE= %.3f ms, Each thread uses %.3f MB\n",
average, (double)N*(double)N*sizeof(double)/(1024*1024));
printf (" Example completed. \n\n");
return 0;
}
|
omp4_demo1.c | /*
Copyright since 2016 the OMPi Team
Dept. of Computer Science & Engineering, University of Ioannina
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* omp4_demo1.c
* ------------
* Simple demonstration of the Zynq and Epiphany as OpenMP4.0 devices.
*/
#include <omp.h>
#include <stdio.h>
void demo_devices()
{
int i, isinitial;
/* Diagnostics */
printf("Available devices: %d\n", omp_get_num_devices());
printf("Default device: %d\n", omp_get_default_device());
for (i = 0; i < omp_get_num_devices(); i++)
{
printf("Kernel check @ device %d ", i);
#pragma omp target device(i) map(from:isinitial)
{
isinitial = omp_is_initial_device();
}
printf(isinitial ? "(zynq)\n" : "(epiphany)\n");
}
}
int main()
{
demo_devices();
return 0;
}
|
DRB037-truedepseconddimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
The inner loop has true dependence.
Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15
*/
#include <stdlib.h>
#include <stdio.h>
double b[1000][1000];
int main(int argc, char* argv[])
{
int i,j;
int n=1000, m=1000;
for (i=0;i<n;i++)
#pragma omp parallel for
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
printf("b[500][500]=%f\n", b[500][500]);
return 0;
}
|
GB_unop__identity_fc64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_int64
// op(A') function: GB_unop_tran__identity_fc64_int64
// C type: GxB_FC64_t
// A type: int64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_int64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calcBox.h | #pragma once
#include <algorithm>
#include <future>
#include <numeric>
#include <thread>
#include <tuple>
#include "box.h"
#include "simd_double2.h"
#include "simd_double4.h"
#include "allocator.h"
using namespace std; //sorry for this "using" in global namespace
namespace for_for
{
double min(const vector<double>& v)
{
double mi = v.front();
for (auto d : v)
if (d < mi) mi = d;
return mi;
}
double max(const vector<double>& v)
{
double ma = v.front();
for (auto d : v)
if (d > ma) ma = d;
return ma;
}
Box<double> calcBox(const vector<double>& v)
{
return make_box(min(v), max(v));
}
}
namespace for_min_max
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
mi = min(mi, d);
ma = max(ma, d);
}
return make_box(mi, ma);
}
}
namespace for_own_min_max
{
inline double myMin(double left, double right) { return right < left ? right : left; }
inline double myMax(double left, double right) { return left < right ? right : left; }
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
mi = myMin(mi, d);
ma = myMax(ma, d);
}
return make_box(mi, ma);
}
}
namespace for_lower_if_if
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i < v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_lower_if_else
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i < v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_if_if
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_if_else
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_if_continue
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (mi <= d && d <= ma) continue;
mi = min(d, mi);
ma = max(d, ma);
}
return make_box(mi, ma);
}
}
namespace for_if_continue_nan_check
{
//v has only NaNs <=> min and max are both NaN
//v has a real item <=> min and max are both real
Box<double> calcBoxWithCheck(const vector<double>& v, size_t* pCounterNaN)
{
auto mi = v.front();
auto ma = v.front();
size_t nans = mi == mi ? 0 : 1;
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
//Minimize comparisions for "typical" data:
//often d is already between min and max
if (mi <= d && d <= ma) continue;
//sometime d is a new min or max
if (d < mi) { mi = d; continue; }
if (ma < d) { ma = d; continue; }
//rarely d is NaN and all comparision above are false
if (!(d == d)) {++nans; continue; }
//rarely d is not NaN but all items before
mi = ma = d;
}
if (pCounterNaN) *pCounterNaN += nans;
return make_box(mi, ma);
}
Box<double> calcBox(const vector<double>& v)
{
return calcBoxWithCheck(v, nullptr);
}
}
namespace for_conditional
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
d < mi ? mi = d : ma < d ? ma = d : d;
}
return make_box(mi, ma);
}
}
namespace for_fixend_if_if
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0, end = v.size(); i != end; ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_fixend_if_else
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0, end = v.size(); i != end; ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_fixend_if_continue
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0, end = v.size(); i != end; ++i) {
const auto& d = v[i];
if (mi <= d && d <= ma) continue;
mi = min(d, mi);
ma = max(d, ma);
}
return make_box(mi, ma);
}
}
namespace for_fixend_conditional
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (size_t i = 0, end = v.size(); i != end; ++i) {
const auto& d = v[i];
d < mi ? mi = d : ma < d ? ma = d : d;
}
return make_box(mi, ma);
}
}
namespace for_if_if_pragma_no_vector
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
#pragma omp parallel num_threads(0)
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_if_else_pragma_no_vector
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
#pragma omp parallel num_threads(0)
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace for_if_continue_pragma_no_vector
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
#pragma omp parallel num_threads(0)
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (mi <= d && d <= ma) continue;
mi = min(d, mi);
ma = max(d, ma);
}
return make_box(mi, ma);
}
}
namespace for_conditional_pragma_no_vector
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
#pragma omp parallel num_threads(0)
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
d < mi ? mi = d : ma < d ? ma = d : d;
}
return make_box(mi, ma);
}
}
namespace for_if_if_pragma_hint_parallel
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
#pragma omp parallel num_threads(4)
for (size_t i = 0; i != v.size(); ++i) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace range_for_if_if
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace range_for_min_max
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
mi = min(mi, d);
ma = max(ma, d);
}
return make_box(mi, ma);
}
}
namespace range_for_if_else
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace range_for_if_continue
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
if (mi <= d && d <= ma) continue;
mi = min(d, mi);
ma = max(d, ma);
}
return make_box(mi, ma);
}
}
namespace range_for_conditional
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v)
d < mi ? mi = d : ma < d ? ma = d : d;
return make_box(mi, ma);
}
}
namespace range_for_conditional_conditional
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
mi = d < mi ? d : mi;
ma = ma < d ? d : ma;
}
return make_box(mi, ma);
}
}
namespace range_for_conditional_conditional_reverse
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
mi = mi <= d ? mi : d;
ma = d <= ma ? ma : d;
}
return make_box(mi, ma);
}
}
namespace range_for_operator
{
struct B
{
double mi;
double ma;
void operator()(double d)
{
if (d < mi) mi = d;
if (ma < d) ma = d;
}
};
Box<double> calcBox(const vector<double>& v)
{
B b;
b.mi = v.front();
b.ma = v.front();
for (const auto& d : v) b(d);
return make_box(b.mi, b.ma);
}
}
namespace while_if_if
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
size_t i = 0;
size_t end = v.size();
while (i++ != end) {
const auto& d = v[i];
if (d < mi) mi = d;
if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace while_if_else
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
size_t i = 0;
size_t end = v.size();
while (i++ != end) {
const auto& d = v[i];
if (d < mi) mi = d;
else if (ma < d) ma = d;
}
return make_box(mi, ma);
}
}
namespace while_if_continue
{
Box<double> calcBox(const vector<double>& v)
{
auto mi = v.front();
auto ma = v.front();
size_t i = 0;
size_t end = v.size();
while (i++ != end) {
const auto& d = v[i];
if (mi <= d && d <= ma) continue;
mi = min(d, mi);
ma = max(d, ma);
}
return make_box(mi, ma);
}
}
namespace while_conditional
{
Box<double> calcBox(const vector<double>& v)
{
auto min = v.front();
auto max = v.front();
size_t i = 0;
size_t end = v.size();
while (i++ != end) {
const auto& d = v[i];
d < min ? min = d : max < d ? max = d : d;
}
return make_box(min, max);
}
}
namespace std_min_element_max_element
{
Box<double> calcBox(const vector<double>& v)
{
return make_box(*min_element(begin(v),end(v)), *max_element(begin(v), end(v)));
}
}
namespace std_minmax_element
{
Box<double> calcBox(const vector<double>& v)
{
const auto p = minmax_element(begin(v), end(v));
return make_box(*p.first, *p.second);
}
}
namespace std_accumulate_function
{
inline Box<double>& insert(Box<double>& b, double d)
{
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
}
Box<double> calcBox(const vector<double>& v)
{
return accumulate(begin(v), end(v), make_box(v.front()), insert);
}
}
namespace std_accumulate_lambda
{
Box<double> calcBox(const vector<double>& v)
{
return accumulate(begin(v), end(v), make_box(v.front()), [](Box<double>& b, double d)
{
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
});
}
}
namespace std_accumulate_lambda_return
{
Box<double> calcBox(const vector<double>& v)
{
return accumulate(begin(v), end(v), make_box(v.front()), [](Box<double>& b, double d)
{
if (b.min <= d && d < b.max)
return b;
d < b.min ? b.min = d : b.max = d;
return b;
});
}
}
namespace std_accumulate_lambda_copy
{
Box<double> calcBox(const vector<double>& v)
{
return accumulate(begin(v), end(v), make_box(v.front()), [](const Box<double>& b, double d)
{
Box<double> c;
c.min = d < b.min ? d : b.min;
c.max = b.max < d ? d : b.max;
return c;
});
}
}
namespace std_future_accumulate_insert
{
inline Box<double>& insertValue(Box<double>& b, double d)
{
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
}
inline Box<double>& insertFutureBox(Box<double>& b, future<Box<double>>& futureBox)
{
auto c = futureBox.get();
b.min = c.min < b.min ? c.min : b.min;
b.max = b.max < c.max ? c.max : b.max;
return b;
}
Box<double> calcBox(const vector<double>& v)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
auto first = begin(v);
vector<future<Box<double>>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [first, part_size]
{
return accumulate(first, next(first, part_size), make_box(*first), insertValue);
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), make_box(v.front()), insertFutureBox);
}
}
namespace std_future_accumulate_lambda
{
Box<double> calcBox(const vector<double>& v)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
auto first = begin(v);
vector<future<Box<double>>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [first, part_size]
{
return accumulate(first, next(first, part_size), make_box(*first), [](Box<double>& b, double d)
{
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
});
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), make_box(v.front()), [](Box<double>& b, future<Box<double>>& futureBox)
{
auto c = futureBox.get();
b.min = c.min < b.min ? c.min : b.min;
b.max = b.max < c.max ? c.max : b.max;
return b;
});
}
}
namespace std_future_while
{
Box<double> calcBox(const vector<double>& v)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
size_t first = 0;
vector<future<Box<double>>> futures;
for (size_t i = 0; i != parts; ++i)
{
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [&v, first, part_size]
{
auto mi = v[first];
auto ma = mi;
size_t i = first;
size_t end = first + part_size;
while (i++ != end) {
const double& d = v[i];
mi = d < mi ? d : mi;
ma = ma < d ? d : ma;
}
return make_box(mi, ma);
}));
first += part_size;
}
auto mi = v.front();
auto ma = v.front();
for (auto& fut : futures) {
auto c = fut.get();
mi = c.min < mi ? c.min : mi;
ma = ma < c.max ? c.max : ma;
}
return make_box(mi, ma);
}
}
namespace accumulate_parallel
{
template<class Container, class Result, class PartFunc, class Func>
Result accumulate_parallel(const Container& v, Result init, PartFunc partFunc, Func func)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
auto first = begin(v);
vector<future<Result>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [init, partFunc, first, part_size]
{
return accumulate(first, next(first, part_size), init, partFunc);
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), init, [func](Result& prevResult, future<Result>& fut)
{
return func(prevResult, fut.get());
});
}
inline Box<double>& insertValue(Box<double>& b, double d)
{
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
}
inline Box<double>& insertBox(Box<double>& b, const Box<double>& c)
{
b.min = c.min < b.min ? c.min : b.min;
b.max = b.max < c.max ? c.max : b.max;
return b;
}
Box<double> calcBox(const vector<double>& v)
{
return accumulate_parallel(v, make_box(v.front()), insertValue, insertBox);
}
}
namespace accumulate_parallel_lambda
{
template<class Container, class Result, class PartFunc, class Func>
Result accumulate_parallel(const Container& v, Result init, PartFunc partFunc, Func func)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
auto first = begin(v);
vector<future<Result>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [init, partFunc, first, part_size]
{
return accumulate(first, next(first, part_size), init, partFunc);
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), init, [func](Result& prevResult, future<Result>& fut)
{
return func(prevResult, fut.get());
});
}
Box<double> calcBox(const vector<double>& v)
{
auto insertValue = [](Box<double>& b, double d) {
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
};
auto insertBox = [](Box<double>& b, const Box<double>& c) {
b.min = c.min < b.min ? c.min : b.min;
b.max = b.max < c.max ? c.max : b.max;
return b;
};
return accumulate_parallel(v, make_box(v.front()), insertValue, insertBox);
}
}
namespace accumulate_parallel_lambda_one_less_thread
{
template<class Container, class Result, class PartFunc, class Func>
Result accumulate_parallel(const Container& v, Result init, PartFunc partFunc, Func func)
{
auto parts = thread::hardware_concurrency() - 1;
auto size = v.size();
auto first = begin(v);
vector<future<Result>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [init, partFunc, first, part_size]
{
return accumulate(first, next(first, part_size), init, partFunc);
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), init, [func](Result& prevResult, future<Result>& fut)
{
return func(prevResult, fut.get());
});
}
Box<double> calcBox(const vector<double>& v)
{
auto insertValue = [](Box<double>& b, double d) {
b.min = d < b.min ? d : b.min;
b.max = b.max < d ? d : b.max;
return b;
};
auto insertBox = [](Box<double>& b, const Box<double>& c) {
b.min = c.min < b.min ? c.min : b.min;
b.max = b.max < c.max ? c.max : b.max;
return b;
};
return accumulate_parallel(v, make_box(v.front()), insertValue, insertBox);
}
}
namespace for_next_two_if_else
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
for (size_t i = 0; i < v.size(); i +=2) {
if (v[i] < v[i+1])
{
if (v[i] < min) min = v[i];
if (max < v[i+1]) max = v[i+1];
}
else
{
if (v[i+1] < min) min = v[i+1];
if (max < v[i]) max = v[i];
}
}
return make_box(min, max);
}
}
namespace for_next_two_assign_before_compare
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
double mi;
double ma;
for (size_t i = 0; i < v.size();) {
a = v[i++];
b = v[i++];
mi = a < b ? ma = b, a : ma = a, b;
if (mi < min) min = mi;
if (max < ma) max = ma;
}
return make_box(min, max);
}
}
namespace for_next_two_assign_before_compare2
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
size_t i = 0;
size_t j = v.size()/2;
const size_t e = v.size()/2;
while (i < e) {
if ((a = v[i++]) < (b = v[j++])) //undefined behavior!!?!
{
if (a < min) min = a;
if (max < b) max = b;
}
else
{
if (b < min) min = b;
if (max < a) max = a;
}
}
return make_box(min, max);
}
}
namespace for_next_two_assign_in_comparision
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
for (size_t i = 0; i < v.size();) {
if ((a = v[i++]) < (b = v[i++]))//undefined behavior!!?!
{
if (a < min) min = a;
if (max < b) max = b;
}
else
{
if (b < min) min = b;
if (max < a) max = a;
}
}
return make_box(min, max);
}
}
namespace for_next_two_assign_minmax
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double mi;
double ma;
for (size_t i = 0; i < v.size(); i += 2) {
tie(mi, ma) = minmax(v[i], v[i+1]);
min = std::min(mi, min);
max = std::max(ma, max);
}
return make_box(min, max);
}
}
namespace while_next_two_assign_before_comparision
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a = v[0];
double b = v[1];
size_t i = 0;
size_t end = v.size();
while (i < end) {
a = v[i++];
b = v[i++];
if (a < b)
{
if (a < min) min = a;
if (max < b) max = b;
}
else
{
if (b < min) min = b;
if (max < a) max = a;
}
}
return make_box(min, max);
}
}
namespace while_next_two_assign_in_comparision
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
size_t i = 0;
size_t j = 1;
size_t end = v.size();
while (j < end) {
if ((a = v[i]) < (b = v[j]))
{
if (a < min) min = a;
if (max < b) max = b;
}
else
{
if (b < min) min = b;
if (max < a) max = a;
}
++i; ++i;
++j; ++j;
}
return make_box(min, max);
}
}
namespace for_next_two_assign_in_comparision2
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a = v[0];
double b = v[1];
for (size_t i = 0; i < v.size();) {
a = v[i++];
if (a < (b=v[i++]))
{
if (a < min) min = a;
if (max < b) max = b;
}
else
{
if (b < min) min = b;
if (max < a) max = a;
}
}
return make_box(min, max);
}
}
namespace for_next_two_conditional
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
for (size_t i = 0; i < v.size(); ) {
if ((a = v[i++]) < (b = v[i++]))//undefined behavior!!?!
{
min = a < min ? a : min;
max = max < b ? b : max;
}
else
{
min = b < min ? b : min;
max = max < a ? a : max;
}
}
return make_box(min, max);
}
}
namespace for_next_two_conditional2
{
Box<double> calcBox(const vector<double>& v)
{
double min = v.front();
double max = v.front();
double a;
double b;
bool c;
for (size_t i = 0; i < v.size(); ) {
c = (a = v[i++]) < (b = v[i++]);//undefined behavior!!?!
c ? (a < min ? min = a : 0.0) , (max < b ? max = b : 0.0) : (b < min ? min = b : 0.0) , (max < a ? max = a : 0.0);
}
return make_box(min, max);
}
}
namespace simd2_range_for
{
Box<double> calcBox(const vector<simd::double2>& v)
{
using namespace simd;
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
mi = min(d, mi);
ma = max(d, ma);
}
return make_double_box(mi, ma);
}
}
namespace simd2_while
{
Box<double> calcBox(const vector<simd::double2>& v)
{
using namespace simd;
auto mi = v.front();
auto ma = v.front();
auto it = begin(v), e = end(v);
while (it != e) {
const auto& d = *it;
mi = min(d, mi);
ma = max(d, ma);
++it;
}
return make_double_box(mi, ma);
}
}
namespace simd2_future_while
{
Box<double> calcBox(const vector<simd::double2>& v)
{
using namespace simd;
auto parts = thread::hardware_concurrency();
auto size = v.size();
size_t first = 0;
vector<future<Box<double>>> futures;
for (size_t i = 0; i != parts; ++i)
{
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [&v, first, part_size]
{
auto mi = v[first];
auto ma = mi;
size_t i = first;
size_t end = first + part_size;
while (i != end) {
const auto& d = v[i];
mi = min(d, mi);
ma = max(d, ma);
++i;
}
return make_double_box(mi, ma);
}));
first += part_size;
}
auto b = make_double_box(v.front(), v.front());
auto& mi = b.min;
auto& ma = b.max;
for (auto& fut : futures) {
auto b = fut.get();
mi = min(b.min, mi);
ma = max(b.max, ma);
}
return make_box(mi, ma);
}
}
namespace simd2_accumulate_parallel
{
template<class Container, class Result, class PartFunc, class Func>
Result accumulate_parallel(const Container& v, Result init, PartFunc partFunc, Func func)
{
auto parts = thread::hardware_concurrency();
auto size = v.size();
auto first = begin(v);
vector<future<Result>> futures;
for (size_t i = 0; i != parts; ++i) {
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [init, partFunc, first, part_size]
{
return accumulate(first, next(first, part_size), init, partFunc);
}));
advance(first, part_size);
}
return accumulate(begin(futures), end(futures), init, [func](Result& prevResult, future<Result>& fut)
{
return func(prevResult, fut.get());
});
}
Box<double> calcBox(const vector<simd::double2>& v)
{
using namespace simd;
Box2d box2d(v.front());
box2d = accumulate_parallel(v, box2d, insertValue, insertBox);
return make_box(box2d.min(), box2d.max());
}
}
namespace simd4_range_for
{
Box<double> calcBox(const vector<simd::double4, AlignedAllocator<simd::double4, 32>>& v)
{
using namespace simd;
auto mi = v.front();
auto ma = v.front();
for (const auto& d : v) {
mi = min(d, mi);
ma = max(d, ma);
}
return make_double_box(mi, ma);
}
}
namespace simd4_range_for_range_for
{
Box<double> calcBox(const vector<simd::double4, AlignedAllocator<simd::double4, 32>>& v)
{
using namespace simd;
auto mi = v.front();
for (const auto& d : v)
mi = min(d, mi);
auto ma = v.front();
for (const auto& d : v)
ma = max(d, ma);
return make_double_box(mi, ma);
}
}
namespace simd4_while
{
Box<double> calcBox(const vector<simd::double4, AlignedAllocator<simd::double4, 32>>& v)
{
using namespace simd;
auto mi = v.front();
auto ma = v.front();
auto it = begin(v), e = end(v);
while (it != e) {
const auto& d = *it;
mi = min(d, mi);
ma = max(d, ma);
++it;
}
return make_double_box(mi, ma);
}
}
namespace simd4_while_while
{
Box<double> calcBox(const vector<simd::double4, AlignedAllocator<simd::double4, 32>>& v)
{
using namespace simd;
auto mi = v.front();
{
auto it = begin(v), e = end(v);
while (it != e) {
mi = min(*it, mi);
++it;
}
}
auto ma = v.front();
{
auto it = begin(v), e = end(v);
while (it != e) {
ma = max(*it, ma);
++it;
}
}
return make_double_box(mi, ma);
}
}
namespace simd4_future_while
{
Box<double> calcBox(const vector<simd::double4, AlignedAllocator<simd::double4, 32>>& v)
{
using namespace simd;
auto parts = thread::hardware_concurrency();
auto size = v.size();
size_t first = 0;
vector<future<Box<double>>> futures;
for (size_t i = 0; i != parts; ++i)
{
const auto part_size = (size * i + size) / parts - (size * i) / parts;
futures.emplace_back(async(launch::async, [&v, first, part_size]
{
auto mi = v[first];
auto ma = mi;
size_t i = first;
size_t end = first + part_size;
while (i != end) {
const double4& d = v[i];
mi = min(d, mi);
ma = max(d, ma);
++i;
}
return make_double_box(mi, ma);
}));
first += part_size;
}
auto b = make_double_box(v.front(), v.front());
auto& mi = b.min;
auto& ma = b.max;
for (auto& fut : futures) {
auto b = fut.get();
mi = min(b.min, mi);
ma = max(b.max, ma);
}
return make_box(mi, ma);
}
}
|
denserasternonsimd.h | #pragma once
#include "gdx/cell.h"
#include "gdx/cpupredicates-private.h"
#include "gdx/eigeniterationsupport-private.h"
#include "gdx/exception.h"
#include "gdx/nodatapredicates-private.h"
#include "gdx/rasterchecks.h"
#include "gdx/rasteriterator.h"
#include "gdx/rastermetadata.h"
#include "infra/cast.h"
#include "infra/span.h"
#include "infra/string.h"
#include <Eigen/Core>
#include <algorithm>
#include <cassert>
#include <vector>
namespace gdx::nosimd {
template <typename T>
class DenseRaster
{
public:
using value_type = T;
using size_type = std::size_t;
using data_type = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using nodata_type = std::optional<value_type>;
using pointer = T*;
using const_pointer = const T*;
using iterator = pointer;
using const_iterator = const_pointer;
static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN;
static constexpr bool with_nodata = true;
static constexpr T NaN = std::numeric_limits<T>::quiet_NaN();
static constexpr bool typeHasNaN()
{
return raster_type_has_nan;
}
DenseRaster() = default;
DenseRaster(int32_t rows, int32_t cols)
: _meta(rows, cols)
, _data(rows, cols)
{
}
explicit DenseRaster(RasterMetadata meta)
: _meta(std::move(meta))
, _data(_meta.rows, _meta.cols)
{
init_nodata_values();
}
DenseRaster(int32_t rows, int32_t cols, T fillValue)
: DenseRaster(RasterMetadata(rows, cols), fillValue)
{
}
DenseRaster(const RasterMetadata& meta, T fillValue)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
if constexpr (raster_type_has_nan) {
// make sure we fill tha raster with NaNs if the fill value is the nodata value
if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) {
fillValue = NaN;
}
}
fill(fillValue);
}
DenseRaster(int32_t rows, int32_t cols, std::span<const T> data)
: DenseRaster(RasterMetadata(rows, cols), data)
{
}
DenseRaster(const RasterMetadata& meta, std::span<const T> data)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
throw_on_datasize_mismatch(meta.rows, meta.cols, data.size());
std::copy(data.begin(), data.end(), _data.data());
init_nodata_values();
}
DenseRaster(const RasterMetadata& meta, data_type&& data)
: _meta(meta)
, _data(data)
{
if (inf::truncate<int32_t>(_data.size()) != meta.rows * meta.cols) {
throw InvalidArgument("Invalid data size provided");
}
init_nodata_values();
}
DenseRaster(DenseRaster<T>&&) noexcept = default;
DenseRaster(const DenseRaster<T>& other) = delete;
DenseRaster& operator=(DenseRaster<T>&&) = default;
DenseRaster& operator=(const DenseRaster<T>& other) = delete;
void resize_and_fill(int32_t rows, int32_t cols, value_type value)
{
resize(rows, cols);
fill(value);
}
void resize(int32_t rows, int32_t cols)
{
_meta.rows = rows;
_meta.cols = cols;
_data.resize(rows, cols);
}
void resize(int32_t rows, int32_t cols, std::optional<double> nodata)
{
_meta.rows = rows;
_meta.cols = cols;
_meta.nodata = nodata;
_data.resize(rows, cols);
}
void set_metadata(RasterMetadata meta)
{
if (meta.rows * meta.cols != ssize()) {
throw InvalidArgument("Cannot change metadata: invalid size");
}
_meta = std::move(meta);
}
DenseRaster<T> copy() const
{
DenseRaster<T> dst(_meta);
dst._data = _data;
return dst;
}
auto begin()
{
return Eigen::begin(_data);
}
auto begin() const
{
return cbegin();
}
auto cbegin() const
{
return Eigen::cbegin(_data);
}
auto end()
{
return Eigen::end(_data);
}
auto end() const
{
return cend();
}
auto cend() const
{
return Eigen::cend(_data);
}
const value_type* data() const noexcept
{
return _data.data();
}
value_type* data() noexcept
{
return _data.data();
}
bool has_nodata() const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::any_of(begin(), end(), [](T value) { return std::isnan(value); });
} else {
return std::any_of(begin(), end(), [nod = static_cast<T>(*_meta.nodata)](T value) { return value == nod; });
}
}
return false;
}
std::optional<T> nodata() const noexcept
{
return inf::optional_cast<T>(_meta.nodata);
}
std::size_t size() const noexcept
{
return _data.size();
}
std::ptrdiff_t ssize() const noexcept
{
assert(_data.size() <= std::numeric_limits<std::ptrdiff_t>::max());
return static_cast<std::ptrdiff_t>(_data.size());
}
bool empty() const noexcept
{
return _data.size() == 0;
}
void collapse_data()
{
// no collapse needed for non floating point types
if constexpr (raster_type_has_nan) {
if (_meta.nodata.has_value() && !std::isnan(*_meta.nodata)) {
std::transform(begin(), end(), begin(), [nod = inf::truncate<T>(*_meta.nodata)](T value) {
return std::isnan(value) ? nod : value;
});
}
}
}
const RasterMetadata& metadata() const noexcept
{
return _meta;
}
void set_projection(int32_t epsg)
{
_meta.set_projection_from_epsg(epsg);
}
void clear_projection()
{
_meta.projection.clear();
}
void set_nodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Nodata value cannot be NaN for integral rasters");
}
}
_meta.nodata = newValue;
}
void replace_nodata(T newValue)
{
const auto dataSize = _data.size();
for (int i = 0; i < dataSize; ++i) {
if (is_nodata(i)) {
_data(i) = newValue;
}
}
_meta.nodata.reset();
}
void turn_value_into_nodata(T value)
{
const auto dataSize = _data.size();
for (int i = 0; i < dataSize; ++i) {
if (_data(i) == value) {
mark_as_nodata(i);
}
}
}
// assigns the value to all the elements of the raster, even nodata
void fill(value_type value)
{
std::fill(begin(), end(), value);
}
// assigns the value to all the elements of the raster, leaving nodata values intact
void fill_values(value_type value)
{
std::fill(value_begin(*this), value_end(*this), value);
}
// Makes all elements of the raster nodata values
void fill_with_nodata()
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
fill(NaN);
} else {
fill(static_cast<T>(*_meta.nodata));
}
}
}
int32_t rows() const noexcept
{
return _meta.rows;
}
int32_t cols() const noexcept
{
return _meta.cols;
}
void mark_as_data(std::size_t /*index*/) noexcept
{
}
void mark_as_data(Cell /*cell*/) noexcept
{
}
void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept
{
}
void mark_as_nodata(std::size_t index)
{
assert(_meta.nodata.has_value());
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
_data(index) = NaN;
} else {
_data(index) = static_cast<T>(*_meta.nodata);
}
}
}
void mark_as_nodata(int32_t row, int32_t col)
{
assert(_meta.nodata.has_value());
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
_data(row, col) = NaN;
} else {
_data(row, col) = static_cast<T>(*_meta.nodata);
}
}
}
void mark_as_nodata(Cell cell)
{
mark_as_nodata(cell.r, cell.c);
}
std::optional<value_type> optional_value(std::size_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<value_type>();
} else {
return _data(index);
}
}
template <typename VarType>
std::optional<VarType> optional_value_as(std::size_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<VarType>();
} else {
return static_cast<VarType>(_data(index));
}
}
bool is_nodata_value(T value) const noexcept
{
if constexpr (raster_type_has_nan) {
return std::isnan(value);
} else {
if (_meta.nodata.has_value()) {
return value == *_meta.nodata;
} else {
return false;
}
}
}
bool is_nodata(std::size_t index) const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::isnan(_data(index));
} else {
return _data(index) == static_cast<T>(*_meta.nodata);
}
}
return false;
}
bool is_nodata(const Cell& cell) const noexcept
{
return is_nodata(cell.r, cell.c);
}
bool is_nodata(int32_t r, int32_t c) const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::isnan(_data(r, c));
} else {
return _data(r, c) == static_cast<T>(*_meta.nodata);
}
}
return false;
}
bool tolerant_equal_to(const DenseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept
{
if (_meta != other._meta) {
return false;
}
return tolerant_data_equal_to(other, tolerance);
}
bool tolerant_data_equal_to(const DenseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept
{
throw_on_size_mismatch(*this, other);
cpu::float_equal_to<T> comp(relTolerance);
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) != other.is_nodata(i)) {
return false;
}
if (!is_nodata(i) && !comp(_data(i), other[i])) {
return false;
}
}
return true;
}
/* Add the value to the cell, if the cell is nodata it will become data with the provided value */
void add_to_cell(Cell c, T value)
{
if (is_nodata(c)) {
(*this)[c] = value;
} else {
(*this)[c] += value;
}
}
bool operator==(const DenseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) != other.is_nodata(i)) {
return false;
}
if (!is_nodata(i) && (_data(i) != other[i])) {
return false;
}
}
return true;
}
bool operator!=(const DenseRaster<T>& other) const noexcept
{
return !(*this == other);
}
DenseRaster<uint8_t> not_equals(const DenseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return perform_binary_operation<nodata::not_equal_to>(other);
}
template <typename TValue>
DenseRaster<uint8_t> not_equals(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_unary_operation<nodata::not_equal_to>(value);
}
template <typename TOther>
auto operator+(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return perform_raster_operation<std::plus>(other);
}
template <typename TValue>
auto operator+(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<nodata::plus_scalar>(value);
}
DenseRaster<T>& operator+=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
nodata::plus_scalar<T> pred(nodata(), value);
for (auto& elem : _data) {
elem = pred(elem);
}
return *this;
}
template <typename TOther>
DenseRaster<T>& operator+=(const DenseRaster<TOther>& other)
{
throw_on_size_mismatch(*this, other);
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
bool leftis_nodata = is_nodata(i);
if (leftis_nodata != other.is_nodata(i)) {
if (leftis_nodata) {
mark_as_data(i);
_data(i) = static_cast<T>(other[i]);
}
continue;
}
if (!leftis_nodata) {
_data(i) += static_cast<T>(other[i]);
}
}
return *this;
}
DenseRaster<T> operator-() const
{
if constexpr (std::is_unsigned_v<T>) {
throw RuntimeError("Minus operator applied to unsigned value");
} else {
DenseRaster<T> result(_meta, DenseRaster<T>::data_type(_data));
std::transform(result.begin(), result.end(), result.begin(), nodata::negate<T>(_meta.nodata));
return result;
}
}
template <typename TOther>
auto operator-(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return perform_raster_operation<std::minus>(other);
}
template <typename TValue>
auto operator-(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<nodata::minus_scalar>(value);
}
DenseRaster<T>& operator-=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
nodata::minus_scalar<T> pred(nodata(), value);
for (auto& elem : _data) {
elem = pred(elem);
}
return *this;
}
template <typename TOther>
auto operator*(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return perform_raster_operation<std::multiplies>(other);
}
template <typename TValue>
auto operator*(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<nodata::multiplies_scalar>(value);
}
DenseRaster<T>& operator*=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
nodata::multiplies_scalar<T> pred(nodata(), value);
for (auto& elem : _data) {
elem = pred(elem);
}
return *this;
}
template <typename TOther>
DenseRaster<T>& operator*=(const DenseRaster<TOther>& other)
{
throw_on_size_mismatch(*this, other);
if constexpr (raster_type_has_nan) {
_data *= other._data;
} else {
const auto dataSize = size();
for (size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) || other.is_nodata(i)) {
mark_as_nodata(i);
} else {
_data(i) *= static_cast<T>(other[i]);
}
}
}
return *this;
}
template <typename TOther>
auto operator/(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using TResult = decltype(0.f * TOther()); // use float or double as result type
DenseRaster<TResult> result(_meta);
if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
result.set_nodata(*other.metadata().nodata);
}
if (!result.nodata().has_value()) {
result.set_nodata(std::numeric_limits<TResult>::quiet_NaN());
}
TResult nodata = result.nodata().value();
if constexpr (std::numeric_limits<TResult>::has_quiet_NaN) {
nodata = std::numeric_limits<TResult>::quiet_NaN();
}
#pragma omp parallel for
for (size_t i = 0; i < size(); ++i) {
auto v = other[i];
if (v == 0) {
result[i] = nodata;
} else {
if (is_nodata(i) || other.is_nodata(i)) {
result[i] = nodata;
} else {
result[i] = static_cast<TResult>(_data(i)) / other[i];
}
}
}
return result;
}
template <typename TValue>
auto operator/(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
if (value == 0) {
throw InvalidArgument("Division by zero");
}
return perform_scalar_operation<nodata::divides_scalar>(value);
}
DenseRaster<T>& operator/=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
nodata::divides_scalar<T> pred(nodata(), value);
for (auto& elem : _data) {
elem = pred(elem);
}
return *this;
}
template <typename TOther>
DenseRaster<T>& operator/=(const DenseRaster<TOther>& other)
{
throw_on_size_mismatch(*this, other);
const auto dataSize = size();
for (size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) != other.is_nodata(i)) {
continue;
}
_data(i) /= static_cast<T>(other[i]);
}
return *this;
}
value_type& operator[](std::size_t index)
{
return _data(index);
}
value_type operator[](std::size_t index) const
{
return _data(index);
}
value_type& operator[](const Cell& cell)
{
return _data(cell.r, cell.c);
}
const value_type& operator[](const Cell& cell) const
{
return _data(cell.r, cell.c);
}
value_type& operator()(int32_t row, int32_t col)
{
return _data(row, col);
}
const value_type& operator()(int32_t row, int32_t col) const
{
return _data(row, col);
}
DenseRaster<uint8_t> operator!() const
{
return perform_unary_operation<nodata::logical_not>();
}
template <typename TOther>
DenseRaster<uint8_t> operator&&(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::logical_and>(other);
}
template <typename TOther>
DenseRaster<uint8_t> operator||(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::logical_or>(other);
}
template <typename TOther>
DenseRaster<uint8_t> operator>(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::greater>(other);
}
DenseRaster<uint8_t> operator>(T threshold) const
{
return perform_unary_operation<nodata::greater>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator>=(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::greater_equal>(other);
}
DenseRaster<uint8_t> operator>=(T threshold) const
{
return perform_unary_operation<nodata::greater_equal>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator<(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::less>(other);
}
DenseRaster<uint8_t> operator<(T threshold) const
{
return perform_unary_operation<nodata::less>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator<=(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::less_equal>(other);
}
DenseRaster<uint8_t> operator<=(T threshold) const
{
return perform_unary_operation<nodata::less_equal>(threshold);
}
void replace(T oldValue, T newValue) noexcept
{
std::replace(begin(), end(), oldValue, newValue);
}
std::string to_string() const
{
if constexpr (std::is_same_v<uint8_t, T>) {
DenseRaster<uint16_t> copy(_meta);
std::copy(begin(), end(), copy.begin());
return copy.to_string();
} else {
std::stringstream ss;
for (int i = 0; i < rows(); ++i) {
std::span<const T> row(&_data[i * cols()], cols());
ss << inf::str::join(row, ", ") << "\n";
}
return ss.str();
}
}
void init_nodata_values()
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
std::replace(begin(), end(), static_cast<value_type>(*_meta.nodata), std::numeric_limits<value_type>::quiet_NaN());
}
}
}
private:
static void throw_on_datasize_mismatch(int32_t rows, int32_t cols, size_t dataSize)
{
if (static_cast<size_t>(rows * cols) != dataSize) {
throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols);
}
}
// Performs a unary operation on all the elements that results in true or false
template <template <typename> typename BinaryPredicate, typename TOther>
DenseRaster<uint8_t> perform_unary_operation(TOther value) const
{
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value()) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>());
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(_data(i), static_cast<T>(value));
}
return result;
}
template <template <typename> typename UnaryPredicate>
DenseRaster<uint8_t> perform_unary_operation() const
{
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata));
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
DenseRaster<uint8_t> perform_binary_operation(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using WidestType = decltype(T() * TOther());
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) {
result.set_nodata(std::numeric_limits<uint8_t>::max());
}
auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata);
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
return result;
}
template <template <typename> typename UnaryPredicate, typename TScalar>
auto perform_scalar_operation(TScalar scalar) const
{
using WidestType = decltype(T() * TScalar());
auto pred = UnaryPredicate<WidestType>(_meta.nodata, static_cast<WidestType>(scalar));
DenseRaster<WidestType> result(_meta);
std::transform(cbegin(), cend(), result.begin(), [this, pred](T value) {
if (is_nodata_value(value)) {
return value;
}
return pred(value);
});
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
auto perform_raster_operation(const DenseRaster<TOther>& other) const
{
using WidestType = decltype(T() * TOther());
DenseRaster<WidestType> result(_meta);
if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
result.set_nodata(*other.metadata().nodata);
}
auto operation = BinaryPredicate<WidestType>();
auto nodata = result.nodata().value_or(0);
if constexpr (std::numeric_limits<WidestType>::has_quiet_NaN) {
nodata = std::numeric_limits<WidestType>::quiet_NaN();
}
#pragma omp parallel for
for (std::size_t i = 0; i < size(); ++i) {
if (is_nodata(i) || other.is_nodata(i)) {
result[i] = nodata;
} else {
result[i] = operation(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
}
return result;
}
RasterMetadata _meta;
data_type _data;
};
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
DenseRaster<T> operator+(TScalar lhs, const DenseRaster<T>& rhs)
{
return rhs + lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator-(TScalar value, const DenseRaster<T>& rhs)
{
using ResultType = decltype(TScalar() - T());
DenseRaster<ResultType> result(rhs.metadata());
std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value)));
return result;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
DenseRaster<T> operator*(TScalar lhs, const DenseRaster<T>& rhs)
{
return rhs * lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator/(TScalar scalar, const DenseRaster<T>& rhs)
{
//throw_on_size_mismatch(other);
//// For nan nodata, standard eigen operator can be used
//if constexpr (typeHasNaN() && std::is_same_v<T, TOther>) {
// // all types are the same, no casts needed
// return DenseRaster<T>(_meta, _data / other._data);
//}
//return performRasterOperation<nodata::divides>(other);
using ResultType = decltype(1.0f * T());
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
DenseRaster<ResultType> result(rhs.metadata());
for (std::size_t i = 0; i < rhs.size(); ++i) {
auto value = rhs[i];
if (value == 0) {
if (!result.nodata().has_value()) {
throw InvalidArgument("Division by raster that contains 0 values");
}
result.mark_as_nodata(i);
} else {
result[i] = scalar / static_cast<ResultType>(value);
}
}
return result;
}
template <typename T>
auto cbegin(const DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto cend(const DenseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto begin(DenseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto begin(const DenseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto end(DenseRaster<T>& ras)
{
return ras.end();
}
template <typename T>
auto end(const DenseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
const T* data(const DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
T* data(DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto size(const DenseRaster<T>& ras)
{
return ras.size();
}
}
|
e_gemm.c | #include <stdlib.h>
#include <math.h>
#include "e_gemm.h"
#include "Enclave_t.h"
/*
** 该函数只是调用了gemm_cpu()函数,并且将参数原封不动的传给gemm_cpu()
*/
void ecall_gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float **A, int lda,
float **B, int ldb,
float BETA,
float **C, int ldc)
{
gemm_cpu(TA, TB, M, N, K, ALPHA , *A, lda, *B, ldb,BETA,*C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu(TA, TB, M, N, K, ALPHA ,A, lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
#pragma omp parallel for
for(int i = 0; i < M; ++i){
for(int k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(int j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
#pragma omp parallel for
for(int i = 0; i < M; ++i){
for(int j = 0; j < N; ++j){
register float sum = 0;
for(int k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
#pragma omp parallel for
for(int i = 0; i < M; ++i){
for(int k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(int j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
#pragma omp parallel for
for(int i = 0; i < M; ++i){
for(int j = 0; j < N; ++j){
register float sum = 0;
for(int k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
/*
** 功能:矩阵计算,完成C = ALPHA * A * B + BETA * C矩阵计算,最后的输出为C
** 输入:
** TA,TB 是否需要对A,B做转置操作,是为1,否为0(要不要转置取决于A,B之间维度是否匹配,比如A:3*2,B:4*2,则需要对B转置,才满足矩阵乘法维度匹配规则)
** M A,C的行数(若A需要转置,则此处给出转置后的A即A'的行数,而不是转置前的)
** N B,C的列数(若B需要转置,则此处给出转置后的B即B'的列数,而不是转置前的)
** K A的列数,B的行数(同样,若A与B中的二者或者其中一个需要转置,则不管怎样,转置后的A,B必须行列能够匹配,符合矩阵乘法规则,K也是转置后的值,不是转置前的)
** A,B,C 输入矩阵(一维数组格式)
** ALPHA 系数
** BETA 系数
** lda A的列数(不做转置)或者行数(做转置,且给的是转置后A即A'的行数)
** ldb B的列数(不做转置)或者行数(做转置,且给的是转置后B即B'的行数)
** ldc C的列数
** 说明:如果TA = 0, TB = 0,那么计算的是C = ALPHA * A * B + BETA * C,此时M是A,C的行数,N是B,C的列数,K是A的列数、B的行数,lda是A的列数,ldb是B的列数;
** 如果TA = 1, TB = 0,那么计算的是C = ALPHA * A' * B + BETA * C,此时M是A’,C的行数,N是B,C的列数,K是A'的列数、B的行数,lda是A'的行数,ldb是B的列数;
** 如果TA = 0, TB = 1,那么计算的是C = ALPHA * A * B' + BETA * C,此时M是A,C的行数,N是B',C的列数,K是A的列数、B'的行数,lda是A的列数,ldb是B'的行数;
** 如果TA = 1, TB = 1,那么计算的是C = ALPHA * A' * B' + BETA * C,此时M是A’,C的行数,N是B',C的列数,K是A'的列数、B'的行数,lda是A'的行数,ldb是B'的行数;
** 总之,参与计算的矩阵必须满足矩阵行列匹配规则。比如A为2*3,B为3*2,C为2*2,那么就是第一种情况;而如果A为3*2,B为3*2,C为2*2,
** 那么就是第二种情况;如果A为2*3,B为2*3,C为2*2,对应第三种情况;如果A为2*3,B为2*3,C为2*2,对应第四种情况。
** 链接:此函数是用C实现矩阵乘法运算,这部分代码应该是模仿的Caffe中的math_functions.cpp的代码
** 参考博客:http://www.voidcn.com/blog/thy_2014/article/p-6149690.html
** 举例说明: 这个函数比较难以理解的地方在于A,B有没有转置这个问题上。首先要清楚,虽然这里A,B,C都是矩阵,但其实都是用一维数组按行保存的,
** 举个例子,假设: A = [1, 2, 3, 2, 2, 1], B = [2, 0, 1, 1, 2, 1], C = [3, 0, 1, 2] (这些输入是打死不变的,
** 都是一维数组格式),且C为2*2的矩阵,即C = [3, 0; 1, 2],那么要进行C = ALPHA * A * B + BETA * C的计算,
** 必须满足矩阵乘法行列匹配规则,则参与运算的第一个矩阵只能为2*3,第二个只能为3*2,因为A,B的元素个数已经固定为6个。
** 下面分别说明gemm_nn(),gemm_tn(),gemm_nt,gemm_tt()四个函数对该例子的计算。
** 诚如上所述,不管A, B有没有转置,反正最后参与计算的两个矩阵必须前者为2*3,后者为3*2。如果使用gemm_nn(),A,B都没有转置,
** 那么就要求没有转置的A,B分别为2*3,3*2矩阵,则 A = [ 1, 2, 3; 2, 2, 1], B = [2, 0; 1, 1; 2, 1],
** 调用gemm_nn(2, 2, 3, 1, A, 3, B, 2, C, 2)计算得到 C = [13, 5; 9, 5](其中ALPHA = BETA = 1,下同);
** 如果要用gemm_tn()函数,即A需要进行转置之后才能计算,也即转置之后的维度为2*3,而转置之前的维度为3*2,B没有转置,
** 本身就是3*2的矩阵,这样,A = [ 1, 2; 3, 2; 2, 1], A' = [1, 3, 2; 2, 2, 1], B = [2, 0; 1, 1; 2, 1],
** gemm_tn(2, 2, 3, 1, A, 2, B, 2, C, 2)函数实际计算的是A'*B+C的值,注意此时的A与gemm_nn()中的A有什么不同,
** 输入的一维数组还是[1, 2, 3, 2, 2, 1],如前所述,A是按行保存的,因为此时的A本身是一个3*2的矩阵,按照按行保存规则,
** 就是A = [ 1, 2; 3, 2; 2, 1],调用gemm_tn()的时候,M, N, K分别为2, 2, 3,都是最终参与计算的矩阵的行列数,
** 因为此处真正参与计算的是A'与B,所以M为A'的行数,即为2,N为B的列数,即为2,K为A'与B的列数,即为3,而此时lda=2,
** 是因为A进行了转置,因此输入的是A'的行数,而不是列数3,ldb=2,为B的列数,最终计算得到C=[12, 5; 9, 5]。
** 对于gemm_nt()与gemm_tt(),与上分析一样,不再赘述了。此部分注释进行了测试,对应测试文件darknet_test_gemm.c。
** 强调: 这一系列的gemm()函数,都带有叠加效果,也即最终的值是保存在C中,但这种保存并不是擦除式的保存,而是叠加式的保存,也就是说,
** 如果进入gemm()函数之前,如果C的元素已经有值了,那么这些值不会被擦除掉,而是会将其叠加,
** 其实看式子就可以看出来:此函数完成的是C = ALPHA * A * B + BETA * C矩阵运算。
**
*/
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
#pragma omp parallel for
for(int i = 0; i < M; ++i){
for(int j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
// 根据需要,调用下面四种函数之一
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
} |
GB_unaryop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int32
// op(A') function: GB_tran__identity_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blake2bp.c | /*
BLAKE2 reference source code package - optimized C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
/*
blake2b_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P )
{
int err = blake2b_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = (uint32_t)offset;
P->xof_length = 0;
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2bp_init_leaf_param( S, P );
}
static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = 0;
P->xof_length = 0;
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, S->outlen );
}
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[i], in__, len );
}
blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; /* Mark as last node */
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( FS, out, outlen );
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2bp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2bp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("/Users/yuchenwang/Documents/ButterFly/ButterFly/unittest/model/test.txt");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
printf("%s\n",paths[i]);
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
int j;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
cvNamedWindow("predictions", CV_WINDOW_NORMAL);
if(fullscreen){
cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
}
show_image(im, "predictions");
cvWaitKey(0);
cvDestroyAllWindows();
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
GB_unop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint16
// op(A') function: GB_unop_tran__identity_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint16
(
int64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rnn_helpers.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#ifdef _WIN32
#pragma warning(disable : 4267)
#endif
#include <algorithm>
#include <functional>
#include <future>
#include <string>
#include <vector>
#include "gsl/span"
#include "gsl/gsl_algorithm"
#include "core/common/common.h"
#include "core/common/logging/logging.h"
#include "core/framework/allocator.h"
#include "core/util/math.h"
#include "core/util/math_cpuonly.h"
#ifdef USE_EIGEN_THREADPOOL
#include <unsupported/Eigen/CXX11/ThreadPool>
#else
#include "core/common/task_thread_pool.h"
#endif
namespace onnxruntime {
class Tensor;
class OpKernelContext;
namespace rnn {
namespace detail {
enum Direction {
kForward = 0,
kReverse = 1,
kBidirectional = 2
};
inline Direction MakeDirection(const std::string& direction) {
if (direction == "forward") {
return kForward;
} else if (direction == "reverse") {
return kReverse;
} else if (direction == "bidirectional") {
return kBidirectional;
} else {
ORT_THROW("Invalid 'direction' argument of '", direction,
"'. Must be one of 'forward', 'reverse', or 'bidirectional'.");
}
}
/** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe
@param allocator IAllocator to use for the allocation.
@param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'.
@param unique_ptr unique_ptr that will control the lifetime of the allocated memory.
@param fill If true, fill the allocated memory with fill_value.
@param fill_value Value to use if 'fill' is true.
@returns A span to provide bounds checked access to the allocated memory.
*/
template <typename TAlloc>
gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator,
size_t size,
IAllocatorUniquePtr<TAlloc>& unique_ptr,
bool fill = false, TAlloc fill_value = TAlloc{}) {
unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size);
auto span = gsl::make_span(unique_ptr.get(), size);
if (fill) {
// Do't use span.begin() it will cause performance issue and stop compiler to optimize the code
std::fill_n(unique_ptr.get(), size, fill_value);
}
return span;
}
// validate the common inputs to RNN, LSTM and GRU operators
Status ValidateCommonRnnInputs(const Tensor& X,
const Tensor& W,
const Tensor& R,
const Tensor* B,
int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs
const Tensor* sequence_lens,
const Tensor* initial_h,
int64_t num_directions,
int64_t hidden_size);
/// Copy an input array repeatedly to an output array
/// @param input_begin Beginning of input
/// @param input_end End of input
/// @param output Output iterator
/// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized.
/// @returns Position of output iterator after copy is completed
template <typename TInIter, typename TOutIter>
TOutIter RepeatVectorToConstructArray(TInIter input_begin,
TInIter input_end,
TOutIter output,
int64_t repetitions) {
for (int64_t i = 0; i < repetitions; i++) {
output = std::copy(input_begin, input_end, output);
}
return output;
}
// reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size]
// and output to shape [seq_length, num_directions, batch_size, hidden_size]
template <typename T>
void ReverseSequence(gsl::span<const T> inputs,
gsl::span<T> inputs_reverse,
gsl::span<const int> sequence_lengths,
const int max_sequence_length,
const int batch_size,
const int input_size,
const int num_directions) {
for (int i = 0; i < batch_size; i++) {
int seq_len = sequence_lengths[i];
#ifdef USE_OPENMP
// Parallel execute the loop.
#pragma omp parallel for
#endif
for (int j = 0; j < seq_len; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
#ifdef USE_OPENMP
// Parallel execute the loop.
#pragma omp parallel for
#endif
for (int j = seq_len; j < max_sequence_length; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
}
}
// A has size M x K, B has size N x K (transposed), and C has size M x N
// We check that A, B and C are large enough before calling the lower level GEMM implementation
template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter>
void ComputeGemm(const int M,
const int N,
const int K,
const float alpha,
TSpanAIter A,
TSpanAIter A_end,
const int lda,
TSpanBIter B,
TSpanBIter B_end,
const int ldb,
const float beta,
TSpanCIter C,
TSpanCIter C_end,
const int ldc) {
// validate all the inputs
// need to use the lda/ldb/ldc strides which should be >= the columns for the span
ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N);
ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end);
ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end);
ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end);
::onnxruntime::math::GemmEx<float, CPUMathUtil>(
CblasNoTrans, CblasTrans,
M, N, K, alpha,
&*A, lda,
&*B, ldb, beta,
&*C, ldc, &CPUMathUtil::Instance());
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur,
typename gsl::span<T>::const_iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data();
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T>::iterator cur,
typename gsl::span<T>::iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data() + offset;
}
template <typename TLambda>
void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step,
#ifdef USE_EIGEN_THREADPOOL
Eigen::NonBlockingThreadPool& ttp,
#else
TaskThreadPool& ttp,
#endif
const ::onnxruntime::logging::Logger& logger) {
// #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug
#ifdef NOTHREADS
ORT_UNUSED_PARAMETER(ttp);
ORT_UNUSED_PARAMETER(logger);
for (int i = 0; i < max; i += step) {
(void)name;
std::bind(lambda, i)();
}
#else
#ifdef USE_EIGEN_THREADPOOL
ORT_UNUSED_PARAMETER(name);
ORT_UNUSED_PARAMETER(logger);
std::atomic<int> done(0);
for (int i = 0; i < max; i += step) {
ttp.Schedule([lambda, i, &done]() {
lambda(i);
++done;
});
}
int totalTasks = (int)max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0);
while (done != totalTasks) {
}
#else
std::vector<std::future<void> > task_results{};
task_results.reserve(static_cast<size_t>(std::ceil(max / step)));
for (int i = 0; i < max; i += step) {
std::packaged_task<void()> task{std::bind(lambda, i)};
task_results.emplace_back(task.get_future());
ttp.RunTask(std::move(task));
}
try {
// wait for all and propagate any exceptions
for (auto& future : task_results)
future.get();
} catch (const std::exception& ex) {
LOGS(logger, ERROR) << name << " - exception running tasks: " << ex.what();
throw;
}
#endif // else part of #ifdef USE_EIGEN_THREADPOOLs
#endif // else part of #ifdef NOTHREADS
}
void DumpMatrixImpl(const std::string& name, const float* src, int row, int col,
int offset = 0, int col_width = -1);
// Helper class to wrap the processing of the activation funcs and any alpha/beta values.
// The alpha/beta values are consumed in the order of the activation funcs. once they run out
// defaults will be used as needed.
// The Entries property contains the normalized function names and the alpha/beta value to use.
class ActivationFuncs {
public:
struct Entry {
const std::string name;
const float alpha;
const float beta;
};
ActivationFuncs() = default;
ActivationFuncs(const std::vector<std::string>& funcs,
const std::vector<float>& alphas,
const std::vector<float>& betas);
const std::vector<Entry>& Entries() const {
return entries_;
}
private:
std::vector<Entry> entries_;
};
namespace deepcpu {
using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int);
using ClipWithBiasFuncPtr = void (*)(const float, const float*, float*, const int);
using ActivationFuncPtr = void (*)(float*, const int, const float, const float);
using ActivationFuncBPtr = void (*)(const float*, float*, const int, const float, const float);
using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, const int, const float, const float);
using GruResetGateFuncPtr = void (*)(const float*, float*, float*, const int, const float, const float);
using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, const int, const float, const float);
ActivationFuncPtr ActivationFuncByName(const std::string& func);
LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func);
GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func);
GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func);
void add_bias_into_ignore(const float* ignored, float* pd, const int c);
void add_bias_into(const float* ps, float* pd, const int c);
void clip(const float b, float* pd, const int c);
void clip_add_bias(const float b, const float* pb, float* pd, const int c);
void clip_ignore_bias(const float b, const float* pb, float* pd, const int c);
void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void relu_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void sigmoid_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void tanh_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void sigmoid(float* pd, int c, const float alpha, const float beta);
void tanh(float* pd, int c, const float alpha, const float beta);
void relu(float* pd, int c, const float alpha, const float beta);
void sigmoid_exact(float* pd, int c, const float alpha, const float beta);
void tanh_exact(float* pd, int c, const float alpha, const float beta);
void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, const int c);
void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_reset_gate_relu(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
void gru_output_gate_relu(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
inline void elementwise_product(const float* op1, const float* op2, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += op1[i] * op2[i];
}
inline void elementwise_sum1(const float* src, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += src[i];
}
inline void elementwise_sum2(const float* src1, const float* src2, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += src1[i] + src2[i];
}
} // namespace deepcpu
} // namespace detail
} // namespace rnn
} // namespace onnxruntime
|
spmmd_x_bsr_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_SPMAT_BSR *matA, const ALPHA_SPMAT_BSR *matB, ALPHA_Number *matC, const ALPHA_INT ldc)
{
if (matA->cols != matB->rows || ldc < matB->cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
if(matA->block_layout != matB->block_layout)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
if(matA->block_size != matB->block_size)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT bs = matA->block_size;
ALPHA_INT m = matA->rows * bs;
ALPHA_INT n = matB->cols * bs;
int num_thread = alpha_get_thread_num();
// init C
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT i = 0; i < m; i++)
for(ALPHA_INT j = 0; j < n; j++)
{
alpha_setzero(matC[index2(i, j, ldc)]);
}
ALPHA_INT A_block_cols = matA->cols;
ALPHA_INT A_block_rows = matA->rows;
ALPHA_INT B_block_cols = matB->cols;
ALPHA_INT B_block_rows = matB->rows;
// 计算
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT ar = 0; ar < A_block_rows; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
for (ALPHA_INT bi = matB->rows_start[br]; bi < matB->rows_end[br]; bi++)
{
ALPHA_INT bc = matB->col_indx[bi];
if(matA->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
// row major
for(ALPHA_INT block_ar = 0; block_ar < matA->block_size; block_ar++)
{
for(ALPHA_INT block_ac = 0; block_ac < matA->block_size; block_ac++) //block_aj==block_bi
{
for(ALPHA_INT block_bc = 0; block_bc < matB->block_size; block_bc++)
{
ALPHA_INT ac = br;
ALPHA_INT block_br = block_ac;
ALPHA_INT bs = matA->block_size;
ALPHA_Number av = matA->values[bs*bs*ai + bs*block_ar + block_ac];
ALPHA_Number bv = matB->values[bs*bs*bi + bs*block_br + block_bc];
alpha_madde(matC[index2(ar*bs+block_ar, bc*bs+block_bc, ldc)], av, bv);
}
}
}
}
else
{
//col major
for(ALPHA_INT block_ar = 0; block_ar < matA->block_size; block_ar++)
{
for(ALPHA_INT block_ac = 0; block_ac < matA->block_size; block_ac++) //block_aj==block_bi
{
for(ALPHA_INT block_bc = 0; block_bc < matB->block_size; block_bc++)
{
ALPHA_INT ac = br;
ALPHA_INT block_br = block_ac;
ALPHA_INT bs = matA->block_size;
ALPHA_Number av = matA->values[bs*bs*ai + bs*block_ac + block_ar];
ALPHA_Number bv = matB->values[bs*bs*bi + bs*block_bc + block_br];
alpha_madde(matC[index2(ar*bs+block_ar, bc*bs+block_bc, ldc)], av, bv);
}
}
}
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
#include <limits.h> // for int limits etc.
#include <assert.h>
#include <time.h>
#include <gsl/gsl_histogram.h>
#include <omp.h>
#include "main.h"
#include "dSFMT-src-2.1/dSFMT.h"
//#include "ziggurat/ziggurat.h"
//#define STATS
#define MULTIPLIER 1000000 // seperates the coordiantes (poor man's hash)
int main (int argc, char * argv[]) {
int num_particles = DSFMT_N64*4;
int num_defects = 1024*4;
int box_size = 128;
long long nsteps = 1024*1024*2;
//omp_init_lock(&omplock);
//omp_set_num_threads(1);
if (argc == 5) {
num_particles = atoi(argv[1]);
num_defects = atoi(argv[2]);
box_size = atoi(argv[3]);
nsteps = atoll(argv[4]);
}
else {
printf("\n# ***** Using default values! *****\n\n");
printf("# usage: rw #particles #defects #box #steps\n");
}
// Information:
printf("#-------------------- Paramters --------------------\n");
printf("# Particles: %i\n", num_particles);
printf("# Box size: %i\n", box_size);
printf("# Defects: %i (Density: %.2e)\n", num_defects, (float) num_defects/pow(box_size,3) );
printf("# Steps: %lli\n", nsteps);
printf("#---------------------------------------------------\n");
assert( (box_size & (box_size - 1)) == 0 ); // check if box_size is power of two
//zigset(1);
dsfmt_t dsfmt;
int seed = 1;
// int num_random = (num_particles < 1024) ? 1024: num_particles;
// int *correlation_times = malloc(num_particles * sizeof(int)); // random distribution of correlation times
// char *directions = malloc(nsteps*sizeof(char)); // directions
int **particles = malloc2D_i(num_particles, 4); // 2d array for particles: x,y,z,mag
int **defects = malloc2D_i(num_defects, 3); // 2d array for defect coordinates
//int *particle;
//int direction;
// init random number generator
dsfmt_init_gen_rand(&dsfmt, seed);
// check if we can create the hashes
//assert(box_size < MULTIPLIER);
assert(num_defects < pow(box_size,3));
#ifdef STATS
// statistics
// histogram of diretions
gsl_histogram * h = gsl_histogram_alloc (6);
gsl_histogram_set_ranges_uniform (h, 0, 6);
// histogram of visits
gsl_histogram * hvisits = gsl_histogram_alloc (10000);
gsl_histogram_set_ranges_uniform (hvisits, 0, 10000);
// gsl histogram seems not to be thread safe, allow only 1 thread
omp_set_num_threads(1);
#endif
// Start simulation
// distribute particles from 0 to +box_size
for (int j = 0; j < 3; j++) {
for (int i=0 ;i < num_particles; i++) {
particles[i][j] = (int) (dsfmt_genrand_close_open(&dsfmt)*box_size);
}
}
// distribute defects from 0 to +box_size
for (int j = 0; j < 3; j++) {
for (int i=0 ; i < num_defects; i++) {
int val = (int) (dsfmt_genrand_close_open(&dsfmt)*box_size);
defects[i][j] = val;
}
}
// METHOD 1: now create a hashed list to find them later
// This will be a fallback to METHOD 2, in case memory is not enough
int64_t *hash_list = malloc( num_defects*sizeof(int64_t) );
for (int i = 0; i < num_defects; i++) {
hash_list[i] = hash(defects[i][0],defects[i][1],defects[i][2]);
}
qsort(hash_list, num_defects, sizeof(int64_t), int64_cmp);
// METHOD 2: create lookup table for x -> lookup table for y -> lookup table for z
// The smart thing is that the pointers are NULL if there is no defect in the corresponding slab,
// so only the z coordinates are really arrays
//
// If I need more space one could use the chars as bit fields,
// One needs to calculate the offset (or index of char) to get to the proper group though.
// get offset: offset = coord/sizeof(char) oder coord >> log2(sizeof(char))
// set bit: array[offset] |= 1 << coord%sizeof(char)
// in check_defekt_3d:
// check bit: array[offset] & 1 << coord%sizeof(char) > 1
//
/*
int mem_size_table = 0;
// first coordinate (x) will be an array of pointers to an array of pointers
char ***lookup_table = malloc(box_size * sizeof(char**));
mem_size_table += box_size * sizeof(char**);
// initialize the arrays to NULL pointer
for (int i = 0; i < box_size; i++) {
lookup_table[i]=NULL;
}
for (int i = 0; i < num_defects; i++) {
int x_index = defects[i][0];
int y_index = defects[i][1];
int z_index = defects[i][2] / sizeof(char);
// check if there is already an array at x_index ...
if (lookup_table[x_index] == NULL) {
// ... it's not! Create an array of pointers for the second coordinate
lookup_table[x_index] = malloc(box_size * sizeof(char*)); // malloc second coordinate pointers
mem_size_table += box_size * sizeof(char*);
for (int i = 0; i < box_size; i++)
lookup_table[x_index][i]=NULL; // initialize the second coordiante pointers to NULL
}
// check if there is already an array at [x_index][y_index]
if (lookup_table[x_index][y_index] == NULL) { // check if third coordinate array exists
lookup_table[x_index][y_index] = malloc(box_size * sizeof(char)); // malloc third coordinate array
mem_size_table += box_size * sizeof(char);
for (int i = 0; i < box_size; i++)
lookup_table[x_index][y_index][i]=0; // initialize the third array to zero
}
// set the defect coordinate
lookup_table[x_index][y_index][z_index] = 1;
}
int test_particle[4];
double start = omp_get_wtime();
for (int i = 0; i < 1024*1024*128; i++) {
for (int j = 0; j < 3; j++) {
test_particle[j] = (int) (dsfmt_genrand_close_open(&dsfmt)*box_size);
}
check_defect_3d(test_particle, lookup_table, 10);
}
double stop = omp_get_wtime();
printf("# Time Method 2: %.2fs\n", stop-start);
printf("# Lookup table size M1: %10.1fkB %i %i\n", mem_size_table/((float) (1024)), num_defects, box_size);
*/
// Method 3: Using the scheme above but for x,y,z seperately
int ltb_N = (int) ceil( ( (double)box_size ) / sizeof(int));
int xltb[ltb_N];
int yltb[ltb_N];
int zltb[ltb_N];
for (int i = 0; i < ltb_N; i++) {
xltb[i]=0;
yltb[i]=0;
zltb[i]=0;
}
for (int i = 0; i < num_defects; i++) {
int xi = defects[i][0] / sizeof(int);
int xbit = defects[i][0] % sizeof(int);
//printf("x %i %i %i %i\n",i, defects[i][0], xi, xbit);
xltb[xi] |= 1 << xbit;
int yi = defects[i][1] / sizeof(int);
int ybit = defects[i][1] % sizeof(int);
yltb[yi] |= 1 << ybit;
//printf("y %i %i %i %i\n",i, defects[i][1], yi, ybit);
int zi = defects[i][2] / sizeof(int);
int zbit = defects[i][2] % sizeof(int);
zltb[zi] |= 1 << zbit;
//printf("z %i %i %i %i\n",i, defects[i][2], zi, zbit);
}
/*
start = omp_get_wtime();
for (int i = 0; i < 1024*1024*128; i++) {
for (int j = 0; j < 3; j++) {
test_particle[j] = (int) (dsfmt_genrand_close_open(&dsfmt)*box_size);
}
check_defect_ltb(test_particle, xltb, yltb, zltb , 0);
}
stop = omp_get_wtime();
printf("Time Method 3: %.2fs\n", stop-start);
*/
printf("# Lookup table size M2: %10.1fkB %i %i\n", 3*ltb_N*sizeof(int) / ((float) (1024)), num_defects, box_size);
// check if the lookup table is correct
for (int i = 0; i < num_defects; i++) {
int x = defects[i][0];
int y = defects[i][1];
int z = defects[i][2];
//printf("Test: %i\n",lookup_table[x][y][z]);
/*
assert(lookup_table[x][y][z] == 1); // Method 2
*/
// Method 3
int xi = x/ sizeof(int);
int xbit = x % sizeof(int);
assert( (xltb[xi] & (1<<xbit)) != 0 );
int yi = y/ sizeof(int);
int ybit = y % sizeof(int);
assert( (yltb[yi] & (1<<ybit)) != 0 );
int zi = z/ sizeof(int);
int zbit = z % sizeof(int);
assert( (zltb[zi] & (1<<zbit)) != 0 );
}
/*
for (int i = 0; i < num_particles; i++) {
if (particles[i][3] == 1) printf("%i\n", i);
}
*/
/******************************* loop *********************************/
// exchange outer with inner loop
printf("\n# Starting ...\n");
int *mags = malloc(nsteps * sizeof(int)); // magnetization per step
for (int i = 0; i < nsteps; i++) {
mags[i] = 0;
}
// loop over particles
double calc_time=0;
printf("MinSize: %i\n", DSFMT_N64);
double *dir_pool = malloc(DSFMT_N64 * sizeof(double));
#pragma omp parallel for reduction(+:calc_time) firstprivate(dir_pool)
for (int i = 0; i < num_particles; i+=DSFMT_N64) {
// every thread gets its own RNG
dsfmt_t dsfmt;
dsfmt_init_gen_rand(&dsfmt, i);
/*
double *random_numbers_steps = malloc(nsteps * sizeof(double));
// create random numbers for the movements (directions 1..6)
dsfmt_fill_array_open_close(&dsfmt, random_numbers_steps, nsteps);
// scale the to 0,1,2,3,4,5 (the 6 directions)
for (int i=0 ;i < nsteps; i++) {
directions[i] = (short) (random_numbers_steps[i]*6);
}
free(random_numbers_steps);
*/
// distribution of correlation times, rexp,rnor are NOT thread safe!
//for (int i=0 ;i < num_particles; i++) {
// correlation_times[i] = (int) rexp()*30;
//}
// loop over steps
for (int step = 0; step < nsteps; step++) {
dsfmt_fill_array_open_close(&dsfmt, dir_pool, DSFMT_N64);
double start = omp_get_wtime();
// doing batches of particles
for (int j = 0; j < DSFMT_N64; j++) {
int* particle = particles[i+j];
int direction = dir_pool[j];
//int direction = (int) (dsfmt_genrand_close_open(&dsfmt)*6);
// only move particles which have not met defect yet == 0
// or see how often they met a defefct >= 0
if (particle[3] == 0) {
// random step
move_particle(particle, direction);
// obey periodic boundary conditions, i.e. fold back
check_pbc(particle, box_size);
int tc = 10;
// check_defect(particle, tc , hash_list, num_defects);
// check_defect_tlb(particle, tc, hash_min, span, defekt_ltb);
// check_defect_3d(particle, lookup_table, tc);
check_defect_ltb(particle, xltb, yltb, zltb , tc);
// ref_check_defect(particle, defects, num_defects);
}
else { // particle is trapped, decrease the residual waiting time
particle[3] -= 1;
}
#pragma omp atomic
mags[step] += particle[3];
//gsl_histogram_increment (hvisits, particle[3]);
//if (magnetization == num_particles) main_loop_break = 1;
//int tid = omp_get_thread_num();
//printf("Thread %i: %i %i\n", id, i, direction);
/*if (step%2000 == 0) {
printf("# Step: %8i (MAG: %5i)\r", step, magnetization);
fflush(stdout);
}*/
//printf("%8i %8i %8i %8i\n",particle[0],particle[1],particle[2],particle[3]);
} // end sub particle loop
double stop = omp_get_wtime();
calc_time += (stop-start);
} // end steps loop
/*
if (i%32 == 0) {
double stop = omp_get_wtime();
printf("# Particle: %8i (%8.3f s) Magnetization: %8i\r",i , (stop-start)/32 , magnetization);
fflush(stdout);
}
*/
// open the file we are writing to
//#pragma omp critical
//printf("# Particle: %8i (%8.3f s) \n",i , stop-start);
} // end particle loop
printf("Speed: %.2e s/particle \n", calc_time/num_particles);
FILE *outFile;
char fname[] = "binout.omp";
sprintf(fname, "binout.om%i",0);
outFile = fopen(fname, "w");
// use fwrite to write binary data to the file
fwrite(mags, sizeof(mags[0]), nsteps, outFile);
fclose(outFile);
print_array(particles, 10, 3);
//print_array(particles, num_particles, 4);
free(mags);
free2D_i(particles);
free2D_i(defects);
#ifdef STATS
printf("Directions drawn:\n");
gsl_histogram_fprintf (stdout, h, "%g", "%g");
printf("\n");
gsl_histogram_free (h);
#endif
//omp_destroy_lock(&omplock);
return 0;
}
/***************************************************************************************************************/
/* qsort C-string comparison function */
int cstring_cmp(const void *a, const void *b)
{
// const char **ia = (const char **)a;
// const char **ib = (const char **)b;
// return strcmp(*ia, *ib);
return strcmp ( (const char*)a, (const char*)b);
/* strcmp functions works exactly as expected from
comparison function */
}
/* asm long long comparison function */
/*
int asm64_comp(const void *a, const void *b) {
int i=0;
__asm__(
"mov (%%rdi), %%rdx\n\t" // Subtract low word
"sub (%%rsi), %%rdx\n\t"
"mov 8(%%rdi), %%rdi\n\t" // Subtract high word
"sbb 8(%%rsi), %%rdi\n\t"
"sbb %%eax, %%eax\n\t" // %eax = -1 if below, zero otherwise
"or %%rdx, %%rdi\n\t" // %rdi is non-zero if comparison is non-zero
"neg %%rdi\n\t" // carry flag is 1 if comparison is non-zero
"adc %%eax, %%eax\n\t" // Result in %eax
"movl %%eax, %0\n\t"
: "=a" (i)
:"r" (a), "r" (b)
);
return i;
}
*/
int int64_cmp(const void *a, const void *b)
{
const int64_t *x = a, *y = b;
if(*x > *y)
return 1;
else
return (*x < *y) ? -1 : 0;
}
/* qsort int comparison function */
int int_cmp(const void *a, const void *b)
{
// const int *ia = (const int *)a; // casting pointer types
// const int *ib = (const int *)b;
//return *ia - *ib;
/* integer comparison: returns negative if b > a
and positive if a > b */
return ( *(int*)a - *(int*)b );
}
static inline int64_t hash(int x, int y, int z) {
return (int64_t) MULTIPLIER* (int64_t) MULTIPLIER * (int64_t) x + (int64_t) MULTIPLIER * (int64_t) y + (int64_t) z;
}
int** malloc2D_i(long nrows, long ncolumns){
int **array = malloc(nrows * sizeof(int *));
array[0] = malloc(nrows * ncolumns * sizeof(int));
if (array[0] == NULL) printf("Could not allocate memory");
for(int i = 1; i < nrows; i++)
array[i] = array[0] + i * ncolumns;
// set all elements to 0
for (int i = 0; i < nrows; i++) {
for (int j = 0; j < ncolumns; j++) {
array[i][j] = 0;
}
}
return array;
}
/*
char** malloc2D_char(long nrows, long ncolumns){
char **array = malloc(nrows * sizeof(char *));
array[0] = malloc(nrows * ncolumns * sizeof(char));
if (array[0] == NULL) printf("Could not allocate memory");
for(int i = 1; i < nrows; i++)
array[i] = array[0] + i * ncolumns;
return array;
}
*/
char** malloc2D_char(long nrows, long ncolumns){
char **array = malloc(nrows * sizeof(char *));
for(int i = 0; i < nrows; i++)
array[i] = malloc(ncolumns * sizeof(char));
return array;
}
void free2D_i(int** array) {
//free(&array[0]);
free(array);
}
void print_array(int **array, int nrows, int ncolumns){
for (int i = 0; i < nrows; i++) {
for (int j = 0; j < ncolumns-1; j++) {
printf("%i ",array[i][j]);
}
printf("%i\n",array[i][ncolumns-1]);
}
}
void move_particle(int *particle, int direction){
switch (direction) {
case 0:
particle[0] += 1;
break;
case 1:
particle[0] -= 1;
break;
case 2:
particle[1] += 1;
break;
case 3:
particle[1] -= 1;
break;
case 4:
particle[2] += 1;
break;
case 5:
particle[2] -= 1;
break;
} // end switch statement
}
static inline void check_pbc(int* particle, int box_size) {
for (int i = 0; i < 3; i++) {
// % is NOT the mod operator, but the REMAINDER, it is not working for negative numbers (of course in C only)
//particle[i] = particle[i] % box_size + (particle[i]<0?box_size:0);
particle[i] &= (box_size - 1);
}
}
/* binary search */
void check_defect(int* particle, int correlation_time, int64_t* hash_list, int num_defects ){
int64_t hash_val;
hash_val = hash(particle[0],particle[1],particle[2]);
int * ptr;
ptr = bsearch( &hash_val, hash_list, num_defects , sizeof(int64_t), int64_cmp);
if (ptr != NULL)
particle[3] = correlation_time;
}
/* lookup table 1 */
void check_defect_hash(int* particle, int correlation_time, int64_t hash_list_min, int64_t span, char* defekt_ltb){
int64_t hash_val, offset;
hash_val = hash(particle[0],particle[1],particle[2]);
offset = hash_val - hash_list_min;
if ((offset >= 0) && (offset < span)) {
if (defekt_ltb[ offset ] == 1)
particle[3] = correlation_time;
}
}
/* check Method 3 */
void check_defect_3d(int* particle, char*** lookup, int correlation_time){
int x = particle[0];
int y = particle[1];
int z = particle[2];
if (lookup[x] != NULL) {
if (lookup[x][y] != NULL) {
if (lookup[x][y][z] == 1) {
particle[3] = correlation_time;
}
}
}
}
void check_defect_ltb(int* particle, int* x, int* y, int* z, int correlation_time){
int i = particle[0] / sizeof(int);
int bit = 1<< particle[0] % sizeof(int);
if ( (x[i] & 1 << bit) > 0) {
i = particle[1] / sizeof(int);
bit = 1<< particle[1] % sizeof(int);
if ( (y[i] & 1 << bit) > 0) {
i = particle[2] / sizeof(int);
bit = 1<< particle[2] % sizeof(int);
if ( (z[i] & 1 << bit) > 0) {
particle[3] = correlation_time;
}
}
}
/*
switch ( (x[particle[0] / sizeof(int)]) & (1<< particle[0] % sizeof(int)) ) {
case 0:
break;
default:
switch ( (y[particle[1] / sizeof(int)]) & (1<< particle[1] % sizeof(int)) ) {
case 0:
break;
default:
switch ( (z[particle[2] / sizeof(int)]) & (1<< particle[2] % sizeof(int)) ) {
case 0:
break;
default:
particle[3] = correlation_time;
}
}
}*/
}
/*
void check_defect(int* particle, int correlation_time, int64_t* hash_list, int num_defects ){
int64_t hash_val;
hash_val = hash(particle[0],particle[1],particle[2]);
int bsearch = 0;
int left = 0;
int right = num_defects-1;
while (bsearch == 0 && left <= right) {
// int middle = (left + right) / 2;
// better: avoid integer overflow
int middle = left + (right-left) / 2;
if (hash_val == hash_list[middle]) {
bsearch = 1;
particle[3] = correlation_time;
}
else {
if (hash_val < hash_list[middle]) right = middle - 1;
if (hash_val > hash_list[middle]) left = middle + 1;
}
}
}
*/
// REFERENCE METHOD
void ref_check_defect(int* particle, int** defect_coords, int num_defects){
int isDefect = 0;
for (int i = 0; (i < num_defects) && (isDefect == 0); i++) {
for (int j=0; j<3; j++ ){
if (particle[j] != defect_coords[i][j]) {
break; // coordinate mismatch, go to next particle (break loop over coordinates)
}
else {
if (j==2) { // x,y and z ccordinate match
particle[3] = (int) (rexp()*3) ; // set scalar value
isDefect = 1; // break outer loop
}
}
}
}
}
/*
void HT_check_defect(int* particle, Fnv64_t* hash_list, int num_defects ){
int* pItem;
char hash_string[18];
Fnv64_t hash_val;
snprintf(hash_string, sizeof(hash_string), "%5i %5i %5i", particle[0], particle[1], particle[2]);
hash_val = fnv_64_str(hash_string, FNV0_64_INIT);
pItem = (int*) bsearch (&hash_val, hash_list, num_defects, sizeof (Fnv64_t), fnv64_cmp);
if (pItem != 0) particle[3] = 1; // set scalar value
}
*/
/*
// now create a hashed list to find them easier (hopefully)
Fnv64_t hash_val;
Fnv64_t *hash_list = malloc( num_defects*sizeof(Fnv64_t) );
for (int i = 0; i < num_defects; i++) {
snprintf(hash_string, sizeof(hash_string), "%5i %5i %5i", defects[i][0], defects[i][1], defects[i][2]);
hash_val = fnv_64_str(hash_string, FNV1_64_INIT);
hash_list[i] = hash_val;
}
qsort(hash_list, num_defects,sizeof(Fnv64_t), fnv64_cmp);
*/
/* mit chars
char **hash_list = malloc2D_char(num_defects, 18);
//char hash_list[20][18];
for (int i = 0; i < num_defects; i++) {
snprintf(hash_string, sizeof(hash_string), "%5i %5i %5i", defects[i][0], defects[i][1], defects[i][2]);
hash_list[i] = hash_string;
printf("%03i %s\n",i, hash_list[i]);
}
printf("Sorted\n");
qsort(hash_list, num_defects, sizeof(hash_list[0]), cstring_cmp);//cmpstring_up);
*/
|
resample.h | #ifndef RESAMPLE_H_
#define RESAMPLE_H_
#include <omp.h>
#include <torch/extension.h>
#include "common/resample.h"
namespace spherical {
namespace cpu {
template <typename T>
void ResampleToMap2D(const int64_t num_kernels, torch::Tensor data_in,
torch::Tensor sample_map, const int64_t channels,
const int64_t in_height, const int64_t in_width,
const int64_t out_height, const int64_t out_width,
const InterpolationType interpolation,
torch::Tensor data_out) {
const T *data_in_ptr = data_in.data_ptr<T>();
const T *sample_map_ptr = sample_map.data_ptr<T>();
T *data_out_ptr = data_out.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleToMap2D(index, data_in_ptr, sample_map_ptr, channels,
in_height, in_width, out_height, out_width,
interpolation, data_out_ptr);
}
}
template <typename T>
void ResampleFromMap2D(const int64_t num_kernels, torch::Tensor data_out,
torch::Tensor sample_map, const int64_t channels,
const int64_t in_height, const int64_t in_width,
const int64_t out_height, const int64_t out_width,
const InterpolationType interpolation,
torch::Tensor data_in) {
const T *data_out_ptr = data_out.data_ptr<T>();
const T *sample_map_ptr = sample_map.data_ptr<T>();
T *data_in_ptr = data_in.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleFromMap2D(index, data_out_ptr, sample_map_ptr, channels,
in_height, in_width, out_height, out_width,
interpolation, data_in_ptr);
}
}
// --------------------------------------------
// --------------------------------------------
template <typename T>
void ResampleToMap2DWeighted(
const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map,
torch::Tensor interp_weights, const int64_t channels,
const int64_t in_height, const int64_t in_width, const int64_t out_height,
const int64_t out_width, const InterpolationType interpolation,
const int64_t num_interp_pts, torch::Tensor data_out) {
const T *data_in_ptr = data_in.data_ptr<T>();
const T *sample_map_ptr = sample_map.data_ptr<T>();
const T *interp_weights_ptr = interp_weights.data_ptr<T>();
T *data_out_ptr = data_out.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \
interp_weights_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleToMap2DWeighted(
index, data_in_ptr, sample_map_ptr, interp_weights_ptr, channels,
in_height, in_width, out_height, out_width, interpolation,
num_interp_pts, data_out_ptr);
}
}
template <typename T>
void ResampleFromMap2DWeighted(
const int64_t num_kernels, torch::Tensor data_out, torch::Tensor sample_map,
torch::Tensor interp_weights, const int64_t channels,
const int64_t in_height, const int64_t in_width, const int64_t out_height,
const int64_t out_width, const InterpolationType interpolation,
const int64_t num_interp_pts, torch::Tensor data_in) {
const T *data_out_ptr = data_out.data_ptr<T>();
const T *sample_map_ptr = sample_map.data_ptr<T>();
const T *interp_weights_ptr = interp_weights.data_ptr<T>();
T *data_in_ptr = data_in.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \
interp_weights_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleFromMap2DWeighted(
index, data_out_ptr, sample_map_ptr, interp_weights_ptr, channels,
in_height, in_width, out_height, out_width, interpolation,
num_interp_pts, data_in_ptr);
}
}
// --------------------------------------------
// --------------------------------------------
template <typename T>
void ResampleToMap2DVoting(const int64_t num_kernels, torch::Tensor data_in,
torch::Tensor sample_map, const int64_t channels,
const int64_t in_height, const int64_t in_width,
const int64_t out_height, const int64_t out_width,
const int64_t numCandidates,
torch::Tensor data_out) {
const int64_t *data_in_ptr = data_in.data_ptr<int64_t>();
const int64_t *sample_map_ptr = sample_map.data_ptr<int64_t>();
int64_t *data_out_ptr = data_out.data_ptr<int64_t>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleToMap2DVoting(index, data_in_ptr, sample_map_ptr, channels,
in_height, in_width, out_height, out_width,
numCandidates, data_out_ptr);
}
}
template <typename T>
void ResampleFromUVMaps(const int64_t num_kernels, torch::Tensor data_out,
torch::Tensor quad_idx, torch::Tensor tex_uv,
const int64_t channels, const int64_t num_textures,
const int64_t tex_height, const int64_t tex_width,
const int64_t in_height, const int64_t in_width,
const InterpolationType interpolation,
torch::Tensor data_in) {
const T *data_out_ptr = data_out.data_ptr<T>();
const int64_t *quad_idx_ptr = quad_idx.data_ptr<int64_t>();
const T *tex_uv_ptr = tex_uv.data_ptr<T>();
T *data_in_ptr = data_in.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_in_ptr, quad_idx_ptr, tex_uv_ptr, \
data_out_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleFromUVMaps(index, data_out_ptr, quad_idx_ptr, tex_uv_ptr,
channels, num_textures, tex_height, tex_width,
in_height, in_width, interpolation, data_in_ptr);
}
}
template <typename T>
void ResampleToUVMaps(const int64_t num_kernels, torch::Tensor data_in,
torch::Tensor quad_idx, torch::Tensor tex_uv,
const int64_t channels, const int64_t num_textures,
const int64_t tex_height, const int64_t tex_width,
const int64_t in_height, const int64_t in_width,
const InterpolationType interpolation,
torch::Tensor data_out) {
const T *data_in_ptr = data_in.data_ptr<T>();
const int64_t *quad_idx_ptr = quad_idx.data_ptr<int64_t>();
const T *tex_uv_ptr = tex_uv.data_ptr<T>();
T *data_out_ptr = data_out.data_ptr<T>();
int64_t index;
#pragma omp parallel for shared(data_out_ptr, quad_idx_ptr, tex_uv_ptr, \
data_in_ptr) private(index) schedule(static)
for (index = 0; index < num_kernels; index++) {
common::ResampleToUVMaps(index, data_in_ptr, quad_idx_ptr, tex_uv_ptr,
channels, num_textures, tex_height, tex_width,
in_height, in_width, interpolation, data_out_ptr);
}
}
} // namespace cpu
} // namespace spherical
#endif |
simd_metadata.c | // RUN: %clang_cc1 -fopenmp=libiomp5 -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp=libiomp5 -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
void h1(float *c, float *a, double b[], int size)
{
// CHECK-LABEL: define void @h1
int t = 0;
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// do not emit parallel_loop_access metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
}
}
void h2(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h2
int t = 0;
#pragma omp simd linear(t)
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access [[LOOP_H2_HEADER:![0-9]+]]
}
}
void h3(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h3
#pragma omp simd
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
c[j*i] = a[i] * b[j];
}
}
// do not emit parallel_loop_access for nested loop.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
}
// Metadata for h1:
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_16:![0-9]+]], [[LOOP_VEC_ENABLE:![0-9]+]]}
// CHECK: [[LOOP_WIDTH_16]] = !{!"llvm.loop.vectorize.width", i32 16}
// CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true}
//
// Metadata for h2:
// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], [[LOOP_VEC_ENABLE]]}
//
// Metadata for h3:
// CHECK: [[LOOP_H3_HEADER:![0-9]+]] = distinct !{[[LOOP_H3_HEADER]], [[LOOP_VEC_ENABLE]]}
//
|
DRB053-inneronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
But the inner level loop can be parallelized.
*/
#include <string.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int j;
double a[20][20];
memset(a,0,sizeof(a));
#pragma omp parallel for private (i,j)
for (i = 0; i <= 19; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 19; j += 1) {
a[i][j] = (i * 20 + j);
}
}
for (i = 0; i <= 18; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 19; j += 1) {
a[i][j] += a[i + 1][j];
}
}
for (i = 0; i <= 19; i += 1) {
for (j = 0; j <= 19; j += 1) {
printf("%lf\n",a[i][j]);
}
}
return 0;
}
|
Determanager.h | /*****************************************************************************************[Cooperation.h]
Copyright (c) 2008-20011, Youssef Hamadi, Saïd Jabbour and Lakhdar Saïs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************************/
/* importClauseSwitchMode : (Cooperation* coop)
Description :
In detreministic case, the two barriers guaranty that during import process no other thread can return to search.
Otherwise, each found a solution go out.*/
//=================================================================================================
#include "core/msg.h"
using namespace Minisat;
//XXX called only once by solver immediately after export
lbool Solver::importClauses(Cooperation* coop) {
//Control the limit size clause export
coop->updateLimitExportClauses(this);
switch(deterministic_mode){
//XXX Non-deterministic mode seems easiest?
case 0: // non deterministic case
{
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef)
return coop->answer(t);
//XXX A thread has found an answer. Broadcast via MPI. Maybe in calling function.
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
pull_clauses_from_remote(this, coop, this->threadId) ;
/* Lit** t = pull_clauses_from_remote(this->threadId) ; */
/* coop->importRemoteClauses(this, t) ; */
//extraUnits = pull_units_from_remote(this->threadId) ;
//coop->importExtraUnits(this, extraUnits);
break;
}
case 1: // deterministic case static frequency
{
if((int) conflicts % coop->initFreq == 0 || coop->answer(threadId) != l_Undef){
#pragma omp barrier
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
case 2: // deterministic case dynamic frequency
{
if(((int) conflicts % coop->deterministic_freq[threadId] == 0) || (coop->answer(threadId) != l_Undef)){ coop->learntsz[threadId] = nLearnts();
#pragma omp barrier
// each thread has its own frequency barrier synchronization
updateFrequency(coop);
coop->deterministic_freq[threadId] = updateFrequency(coop);
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
}
return l_Undef;
}
/*_________________________________________________________________________________________________
updateFrequency : (Cooperation* coop)
Description :
when det=2, each thread try to estimate the number of conflicts under which it must to join the barrier.
This estimation based on the calculus of the number of learnts clauses of all learnts and assume that
greater the learnts base slower is the unit propagation, which stay a not bad estimation.
*/
int Solver::updateFrequency(Cooperation* coop){
double freq = 0;
int maxLearnts = 0;
for(int t = 0; t < coop->nThreads(); t++)
if((int)coop->learntsz[t] > maxLearnts)
maxLearnts = (int)coop->learntsz[t];
freq = coop->initFreq + (double)coop->initFreq * (maxLearnts -learnts.size()) / maxLearnts;
return (int) freq;
}
/*_________________________________________________________________________________________________
uncheckedEnqueueImportedUnits : (Cooperation* coop)
Description :
At level 0, units literals propaged are exported to others threads
*/
void Solver::exportClause(Cooperation* coop, vec<Lit>& learnt_clause) {
if(coop->limitszClauses() < 1)
return;
if(decisionLevel() == 0){
for(int i = tailUnitLit; i < trail.size(); i++) {
coop->exportExtraUnit(this, trail[i]) ;
push_unit_remote(trail[i]) ;
}
tailUnitLit = trail.size();
}else {
coop->exportExtraClause(this, learnt_clause) ;
push_clause_remote(learnt_clause) ;
}
}
//=================================================================================================
// add Clauses received from others threads
CRef Solver::addExtraClause(vec<Lit>& lits){
CRef cr = ca.alloc(lits, true);
learnts.push(cr);
attachClause(cr);
claBumpActivity(ca[cr]);
return cr;
}
//=================================================================================================
// at level 0, unit extra clauses stored are propagated
void Solver::propagateExtraUnits(){
for(int i = 0; i < extraUnits.size(); i++)
if(value(extraUnits[i]) == l_Undef)
uncheckedEnqueue(extraUnits[i]);
}
|
ntlmv1_mschapv2_fmt_plug.c | /*
* Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into
* this one file, sharing functions.
*
* NETNTLM_fmt.c -- NTLM Challenge/Response
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
* MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2
* Written by JoMo-Kun <jmk at foofus.net> in 2010
* and placed in the public domain.
*
* Support for freeradius-wep-patch challenge/response format
* added by Linus Lüssing in 2012 and is licensed under CC0/PD terms:
* To the extent possible under law, Linus Lüssing has waived all copyright
* and related or neighboring rights to this work. This work is published from:
* Germany.
*
*
* This algorithm is designed for performing brute-force cracking of the
* MSCHAPv2 challenge/response sets exchanged during network-based
* authentication attempts. The captured challenge/response set from these
* attempts should be stored using the following format:
*
* USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE:
*
* For example:
* User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E
* domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc
*
* http://freeradius.org/rfc/rfc2759.txt
*
* Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by
* magnum 2010-2011 and 2013.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MSCHAPv2_new;
extern struct fmt_main fmt_NETNTLM_new;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MSCHAPv2_new);
john_register_one(&fmt_NETNTLM_new);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "simd-intrinsics.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4)
#else
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "memory.h"
#include "johnswap.h"
#include "sha.h"
#include "md4.h"
#include "md5.h"
#include "unicode.h"
#include "john.h"
#include "memdbg.h"
extern volatile int bench_running;
#ifndef uchar
#define uchar unsigned char
#endif
#define CHAP_FORMAT_LABEL "MSCHAPv2"
#define CHAP_FORMAT_NAME "C/R"
#define FORMAT_TAG "$MSCHAPv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAGN "$NETNTLM$"
#define FORMAT_TAGN_LEN (sizeof(FORMAT_TAGN)-1)
#define CHAP_USERNAME_LENGTH 256
#define CHAP_CHALLENGE_LENGTH 64
#define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
#define NTLM_FORMAT_LABEL "netntlm"
#define NTLM_FORMAT_NAME "NTLMv1 C/R"
#define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define FULL_BINARY_SIZE (2 + 8 * 3)
#define BINARY_SIZE (2 + 8)
#define BINARY_ALIGN 2
#define SALT_SIZE 8
#define SALT_ALIGN MEM_ALIGN_WORD
#define CIPHERTEXT_LENGTH 48
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 27
//#define SSE_OMP
#if defined (_OPENMP) && defined(SSE_OMP)
#define BLOCK_LOOPS (2048 / NBKEYS)
#else
#define BLOCK_LOOPS (1024 / NBKEYS)
#endif
#define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
// These 2 get the proper uint32_t limb from the SIMD mixed set. They both
// work properly for both BE and LE machines :) These SHOULD be used whenever
// the full uint32_t item is wanted, usually RHS of an assignment to uint32_t*
// NOTE, i is number is based on uint32_t[] and not uint8_t[] offsets.
#define GETOUTPOS_W32(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i<<2)&(0xffffffff-3))*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32*4 )
#define GETPOS_W32(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i<<2)&(0xffffffff-3))*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
// GETPOS HAS to be BE/LE specific
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#endif
#else
#define PLAINTEXT_LENGTH 64
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 2048
#endif
#ifdef SIMD_COEF_32
static unsigned char *saved_key;
#else
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
#endif
static unsigned short (*crypt_key);
static unsigned char *nthash;
static uint32_t *bitmap;
static int cmps_per_crypt, use_bitmap;
static int valid_i, valid_j;
static uchar *challenge;
static int keys_prepared;
static struct fmt_main *my;
static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */
static struct fmt_tests chap_tests[] = {
{"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"},
{"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"},
{"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"},
{"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */
#if PLAINTEXT_LENGTH >= 35
{"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */
#endif
{"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" },
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" },
/* Ettercap generated three test vectors */
{"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"},
{"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"},
{"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"},
/* Single test vector from chapcrack's sample pcap file */
{"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"},
{"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} },
{"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} },
{"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */
{"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */
{NULL}
};
static struct fmt_tests ntlm_tests[] = {
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
#ifndef SIMD_COEF_32 /* exceeds max length for SSE */
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
#endif
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
inline static void setup_des_key(uchar key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int chap_valid_long(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate Authenticator/Server Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Peer/Client Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate Username Length */
if (strlen(++pos2) > CHAP_USERNAME_LENGTH)
return 0;
return 1;
}
static int chap_valid_short(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate MSCHAPv2 Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
return 1;
}
static void chap_get_challenge(const char *ciphertext,
unsigned char *binary_salt)
{
int i;
const char *pos = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) +
atoi16[ARCH_INDEX(pos[i*2+1])];
}
/* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or
we are going to calculate it via:
sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|)
NOTE, we now ONLY call this function the the short form. The long form gets converted into the short
form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF)
*/
static void *chap_get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char digest[20];
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
/* This is just to silence scan-build. It will never happen.
It is unclear why only this format gave warnings, many others do
similar things. */
if (!ciphertext)
return ciphertext;
memset(binary_salt, 0, SALT_SIZE);
memset(digest, 0, 20);
chap_get_challenge(ciphertext, binary_salt);
return (void*)binary_salt;
}
/*
* This function will convert long hashes, into short ones (the short is now cannonical format)
* converts
* $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu
* into
* $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$
*
* This code was moved from get_salt().
*/
static char *chap_long_to_short(char *ciphertext) {
static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal
static SHA_CTX ctx;
unsigned char tmp[16];
unsigned char digest[20];
char *pos = NULL;
int i;
SHA1_Init(&ctx);
/* Peer Challenge */
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Authenticator Challenge */
pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Username - Only the user name (as presented by the peer and
excluding any prepended domain name) is used as input to SHAUpdate()
*/
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */
SHA1_Update(&ctx, pos, strlen(pos));
SHA1_Final(digest, &ctx);
// Ok, now we re-make our ciphertext buffer, into the short cannonical form.
strcpy(Buf, FORMAT_TAG);
pos = Buf + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++) {
//binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
pos[(i<<1)] = itoa16[digest[i]>>4];
pos[(i<<1)+1] = itoa16[digest[i]&0xF];
}
memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2);
pos[16+CIPHERTEXT_LENGTH+2] = '$';
pos[16+CIPHERTEXT_LENGTH+3] = 0;
//printf("short=%s original=%s\n", Buf, ciphertext);
return Buf;
}
static int chap_valid(char *ciphertext, struct fmt_main *pFmt)
{
char *cp = NULL;
if (chap_valid_short(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
if (cp) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = chap_get_salt(ciphertext);
int i, j;
cp += 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected MSCHAPv2 hash with "
"invalid 3rd block\n");
#endif
}
return 0;
}
static char *chap_prepare_long(char *split_fields[10])
{
char *username, *cp;
/* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */
if ((username = strstr(split_fields[0], "\\")) == NULL)
username = split_fields[0];
else
username++;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+strlen(split_fields[5])+1+strlen(username)+1);
sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4],
split_fields[5], username);
if (chap_valid_long(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare_short(char *split_fields[10])
{
char *cp;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+1+1);
sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]);
if (chap_valid_short(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt)
{
char *ret;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) {
// check for a short format that has any extra trash fields, and if so remove them.
char *cp1, *cp2, *cp3;
cp1 = split_fields[1];
cp1 += FORMAT_TAG_LEN;
cp2 = strchr(cp1, '$');
ret = NULL;
if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) {
++cp2;
cp3 = strchr(cp2, '$');
if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) {
ret = str_alloc_copy(split_fields[1]);
ret[(cp3-split_fields[1]) + 1] = '$';
ret[(cp3-split_fields[1]) + 2] = 0;
//printf("Here is the cut item: %s\n", ret);
}
}
}
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
split_fields[5] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH &&
strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2)
ret = chap_prepare_long(split_fields);
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH)
ret = chap_prepare_short(split_fields);
else
ret = NULL;
if (ret && chap_valid_long(ret))
ret = chap_long_to_short(ret);
else if (chap_valid_long(split_fields[1]))
ret = chap_long_to_short(split_fields[1]);
return ret ? ret : split_fields[1];
}
static char *chap_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CHAP_TOTAL_LENGTH + 1];
int i, j = 0;
memset(out, 0, CHAP_TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
/* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */
for (i = FORMAT_TAG_LEN; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) {
if (out[i] >= 'A' && out[i] <= 'Z')
out[i] |= 0x20;
else if (out[i] == '$')
j++;
}
if (chap_valid_long(out))
return chap_long_to_short(out);
return out;
}
static void *ntlm_get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i;
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += FORMAT_TAGN_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += FORMAT_TAGN_LEN;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
return (void*)binary_salt;
}
static int ntlm_valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAGN, FORMAT_TAGN_LEN)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[FORMAT_TAGN_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = ntlm_get_salt(ciphertext);
int i, j;
ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected NetNTLM hash with invalid "
"3rd block\n");
#endif
}
return 0;
}
static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
if (!strncmp(split_fields[1], FORMAT_TAGN, FORMAT_TAGN_LEN))
return split_fields[1];
if (!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&split_fields[4][32], "0101000000000000", 16))
return split_fields[1];
// Ignore anonymous login (Username "", Password "")
if (split_fields[0] && strlen(split_fields[0]) == 0 &&
!strncasecmp(split_fields[3], "edb7398877d716be", 16) &&
!strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08"
"b178ba65430ad39ae2498629", 48))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(split_fields[3]) == 48 &&
!strncmp(&split_fields[3][16], "00000000000000000000000000000000",
32))
{
memcpy(clientChal, split_fields[3],16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(FORMAT_TAGN_LEN+strlen(split_fields[5])+strlen(clientChal)+1+
strlen(split_fields[4])+1);
sprintf(cp, "%s%s%s$%s", FORMAT_TAGN, split_fields[5], clientChal,
split_fields[4]);
if (ntlm_valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[NTLM_TOTAL_LENGTH + 1];
memset(out, 0, NTLM_TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[FORMAT_TAGN_LEN]); /* Exclude: $NETNTLM$ */
return out;
}
static void set_salt(void *salt)
{
challenge = salt;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key_ansi(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key[index];
while (*s)
*d++ = *s++;
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key[index];
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#endif
#endif
keys_prepared = 0;
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = enc_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef SIMD_COEF_32
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead;
extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
#if NT_FULL_UNICODE
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else
#endif
if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = utf8_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
static void init(struct fmt_main *self)
{
#if defined (_OPENMP) && !defined(SIMD_COEF_32)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
my = self;
if (options.target_enc == UTF_8) {
self->methods.set_key = set_key_utf8;
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
} else {
if (options.target_enc != ASCII &&
options.target_enc != ISO_8859_1)
self->methods.set_key = set_key_CP;
}
if (!saved_key) {
#if SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key) * 64, MEM_ALIGN_SIMD);
nthash = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
nthash = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(unsigned short));
}
if (bitmap == NULL)
bitmap = mem_calloc_align(1, 0x10000 / 8, MEM_ALIGN_CACHE);
else
memset(bitmap, 0, 0x10000 / 8);
use_bitmap = 0; /* we did not use bitmap yet */
cmps_per_crypt = 2; /* try bitmap */
}
static void done(void)
{
MEM_FREE(bitmap);
MEM_FREE(crypt_key);
MEM_FREE(nthash);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
// Get the key back from the key buffer, from UCS-2
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1];
unsigned int md4_size=0;
unsigned int i=0;
for (; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++)
{
#if ARCH_LITTLE_ENDIAN==1
key[md4_size] = keybuf_word[i];
key[md4_size+1] = keybuf_word[i] >> 16;
if (key[md4_size] == 0x80 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x80 &&
((keybuf_word[i+SIMD_COEF_32]&0xFFFF) == 0 ||
md4_size == PLAINTEXT_LENGTH))
{
key[md4_size] = 0;
break;
}
#else
unsigned int INWORD = JOHNSWAP(keybuf_word[i]);
key[md4_size] = INWORD >> 16;
key[md4_size+1] = INWORD;
if (key[md4_size] == 0x8000 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x8000 && (md4_size == PLAINTEXT_LENGTH ||
(keybuf_word[i+SIMD_COEF_32]&0xFFFF0000) == 0))
{
key[md4_size] = 0;
break;
}
#endif
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key[index]);
#endif
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
static int warned = 0, loaded = 0;
DES_cblock *challenge = my->methods.salt(ciphertext);
int i, j;
if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN);
if (john_main_process)
if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) {
warned = 1;
fprintf(stderr, "%s: Note: slow loading. For short runs, try "
"--format=%s-naive\ninstead. That version loads "
"faster but runs slower.\n", my->params.label,
my->params.label);
}
if (chap_valid_short(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
else /* ntlmv1 */
ciphertext = strrchr(ciphertext, '$') + 1;
for (i = 0; i < FULL_BINARY_SIZE - 2; i++) {
binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
{
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = valid_i; binary[1] = valid_j;
goto out;
}
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = i; binary[1] = j;
goto out;
}
}
fprintf(stderr, "Bug: %s hash with invalid 3rd block, should "
"have been rejected in valid()\n", my->params.label);
binary[0] = binary[1] = 0x55;
}
out:
return binary;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
if (!keys_prepared) {
int i = 0;
if (use_bitmap) {
#if MAX_KEYS_PER_CRYPT >= 200
//#warning Notice: Using memset
memset(bitmap, 0, 0x10000 / 8);
#else
//#warning Notice: Not using memset
#ifdef SIMD_COEF_32
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++)
#else
for (i = 0; i < count; i++)
#endif
{
unsigned int value = crypt_key[i];
bitmap[value >> 5] = 0;
}
#endif
}
use_bitmap = cmps_per_crypt >= 2;
cmps_per_crypt = 0;
#ifdef SIMD_COEF_32
#if (BLOCK_LOOPS > 1)
#if defined(_OPENMP) && defined(SSE_OMP)
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN);
#else
SIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN);
#endif
if (use_bitmap)
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
unsigned int value;
value = *(uint32_t*)
&nthash[GETOUTPOS_W32(3, i)] >> 16;
crypt_key[i] = value;
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
else
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
crypt_key[i] = *(uint32_t*)
&nthash[GETOUTPOS_W32(3, i)] >> 16;
}
#else
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++)
#endif
{
MD4_CTX ctx;
MD4_Init( &ctx );
MD4_Update(&ctx, saved_key[i], saved_len[i]);
MD4_Final((uchar*)&nthash[i * 16], &ctx);
crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7];
if (use_bitmap) {
unsigned int value = crypt_key[i];
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
}
#endif
keys_prepared = 1;
}
return count;
}
static int cmp_one(void *binary, int index)
{
#if ARCH_LITTLE_ENDIAN==1
if (crypt_key[index] == *(unsigned short*)binary)
#else
if ( JOHNSWAP(crypt_key[index])>>16 == *(unsigned short*)binary)
#endif
{
DES_key_schedule ks;
DES_cblock computed_binary;
unsigned int key[2];
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < 2; i++)
key[i] =
#if ARCH_LITTLE_ENDIAN==1
*(uint32_t*) &nthash[GETOUTPOS_W32(i, index)];
#else
JOHNSWAP (*(uint32_t*) &nthash[GETOUTPOS_W32(i, index)]);
#endif
#else
memcpy(key, &nthash[index * 16], 8);
#endif
setup_des_key((unsigned char*)key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary,
&ks, DES_ENCRYPT);
return !memcmp(((char*)binary) + 2, computed_binary, 8);
}
return 0;
}
static int cmp_all(void *binary, int count)
{
#if ARCH_LITTLE_ENDIAN==1
unsigned int value = *(unsigned short*)binary;
#else
unsigned int value = JOHNSWAP(*(unsigned short*)binary)>>16;
#endif
int index;
cmps_per_crypt++;
if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f))))
goto out;
#ifdef SIMD_COEF_32
/* Let's give the optimizer a hint! */
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2)
#else
for (index = 0; index < count; index += 2)
#endif
{
unsigned int a = crypt_key[index];
unsigned int b = crypt_key[index + 1];
#if 0
if (((a | b) & value) != value)
continue;
#endif
if (a == value || b == value)
goto thorough;
}
goto out;
thorough:
#ifdef SIMD_COEF_32
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++)
#else
for (; index < count; index++)
#endif
{
if (crypt_key[index] == value && cmp_one(binary, index))
return 1;
}
out:
return 0;
}
static int cmp_exact(char *source, int index)
{
DES_key_schedule ks;
uchar binary[24];
union {
unsigned char key[24];
unsigned int Key32[6];
}k;
char *cp;
int i;
#ifdef SIMD_COEF_32
for (i = 0; i < 4; i++)
k.Key32[i] =
#if ARCH_LITTLE_ENDIAN==1
*(uint32_t*)&nthash[GETOUTPOS_W32(i, index)];
#else
JOHNSWAP(*(uint32_t*)&nthash[GETOUTPOS_W32(i, index)]);
#endif
#else
memcpy(k.key, &nthash[index * 16], 16);
#endif
/* Hash is NULL padded to 21-bytes */
memset(&k.key[16], 0, 5);
/* Split into three 7-byte segments for use as DES keys
Use each key to DES encrypt challenge
Concatenate output to for 24-byte NTLM response */
setup_des_key(k.key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary,
&ks, DES_ENCRYPT);
setup_des_key(&k.key[7], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8],
&ks, DES_ENCRYPT);
setup_des_key(&k.key[14], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16],
&ks, DES_ENCRYPT);
// With the normalized source we simply need to skip the
// $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data.
// $NETNTLM$c75c20bff9baa71f4765f360625700b0$
cp = &source[11];
cp = strchr(cp, '$');
++cp;
for (i = 0; i < 24; ++i) {
unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) +
(atoi16[ARCH_INDEX(*(cp+1))] );
if (c != binary[i])
return 0;
cp += 2;
}
return 1;
}
static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); }
#if ARCH_LITTLE_ENDIAN==1
static int binary_hash_0(void *binary) { return *(unsigned short*)binary & PH_MASK_0; }
static int binary_hash_1(void *binary) { return *(unsigned short*)binary & PH_MASK_1; }
static int binary_hash_2(void *binary) { return *(unsigned short*)binary & PH_MASK_2; }
static int binary_hash_3(void *binary) { return *(unsigned short*)binary & PH_MASK_3; }
#else
static int binary_hash_0(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_0; }
static int binary_hash_1(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_1; }
static int binary_hash_2(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_2; }
static int binary_hash_3(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_3; }
#endif
static int get_hash_0(int index) { return crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index] & PH_MASK_3; }
struct fmt_main fmt_MSCHAPv2_new = {
{
CHAP_FORMAT_LABEL,
CHAP_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_COEF_32) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
chap_tests
}, {
init,
done,
fmt_default_reset,
chap_prepare,
chap_valid,
chap_split,
get_binary,
chap_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_NETNTLM_new = {
{
NTLM_FORMAT_LABEL,
NTLM_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_PARA_MD4) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAGN },
ntlm_tests
}, {
init,
done,
fmt_default_reset,
ntlm_prepare,
ntlm_valid,
ntlm_split,
get_binary,
ntlm_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__bor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bor_int64
// A.*B function (eWiseMult): GB_AemultB__bor_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bor_int64
// C+=b function (dense accum): GB_Cdense_accumb__bor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int64
// C=scalar+B GB_bind1st__bor_int64
// C=scalar+B' GB_bind1st_tran__bor_int64
// C=A+scalar GB_bind2nd__bor_int64
// C=A'+scalar GB_bind2nd_tran__bor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) | (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB_bind1st_tran__bor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB_bind2nd_tran__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMPDEV content;
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
ops->nvclone = N_VClone_OpenMPDEV;
ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
ops->nvdestroy = N_VDestroy_OpenMPDEV;
ops->nvspace = N_VSpace_OpenMPDEV;
ops->nvgetarraypointer = NULL;
ops->nvsetarraypointer = NULL;
/* standard vector operations */
ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
ops->nvconst = N_VConst_OpenMPDEV;
ops->nvprod = N_VProd_OpenMPDEV;
ops->nvdiv = N_VDiv_OpenMPDEV;
ops->nvscale = N_VScale_OpenMPDEV;
ops->nvabs = N_VAbs_OpenMPDEV;
ops->nvinv = N_VInv_OpenMPDEV;
ops->nvaddconst = N_VAddConst_OpenMPDEV;
ops->nvdotprod = N_VDotProd_OpenMPDEV;
ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
ops->nvmin = N_VMin_OpenMPDEV;
ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
ops->nvl1norm = N_VL1Norm_OpenMPDEV;
ops->nvcompare = N_VCompare_OpenMPDEV;
ops->nvinvtest = N_VInvTest_OpenMPDEV;
ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused vector operations (optional, NULL means disabled by default) */
ops->nvlinearcombination = NULL;
ops->nvscaleaddmulti = NULL;
ops->nvdotprodmulti = NULL;
/* vector array operations (optional, NULL means disabled by default) */
ops->nvlinearsumvectorarray = NULL;
ops->nvscalevectorarray = NULL;
ops->nvconstvectorarray = NULL;
ops->nvwrmsnormvectorarray = NULL;
ops->nvwrmsnormmaskvectorarray = NULL;
ops->nvscaleaddmultivectorarray = NULL;
ops->nvlinearcombinationvectorarray = NULL;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = w->ops->nvgetvectorid;
ops->nvclone = w->ops->nvclone;
ops->nvcloneempty = w->ops->nvcloneempty;
ops->nvdestroy = w->ops->nvdestroy;
ops->nvspace = w->ops->nvspace;
ops->nvgetarraypointer = w->ops->nvgetarraypointer;
ops->nvsetarraypointer = w->ops->nvsetarraypointer;
/* standard vector operations */
ops->nvlinearsum = w->ops->nvlinearsum;
ops->nvconst = w->ops->nvconst;
ops->nvprod = w->ops->nvprod;
ops->nvdiv = w->ops->nvdiv;
ops->nvscale = w->ops->nvscale;
ops->nvabs = w->ops->nvabs;
ops->nvinv = w->ops->nvinv;
ops->nvaddconst = w->ops->nvaddconst;
ops->nvdotprod = w->ops->nvdotprod;
ops->nvmaxnorm = w->ops->nvmaxnorm;
ops->nvwrmsnormmask = w->ops->nvwrmsnormmask;
ops->nvwrmsnorm = w->ops->nvwrmsnorm;
ops->nvmin = w->ops->nvmin;
ops->nvwl2norm = w->ops->nvwl2norm;
ops->nvl1norm = w->ops->nvl1norm;
ops->nvcompare = w->ops->nvcompare;
ops->nvinvtest = w->ops->nvinvtest;
ops->nvconstrmask = w->ops->nvconstrmask;
ops->nvminquotient = w->ops->nvminquotient;
/* fused vector operations */
ops->nvlinearcombination = w->ops->nvlinearcombination;
ops->nvscaleaddmulti = w->ops->nvscaleaddmulti;
ops->nvdotprodmulti = w->ops->nvdotprodmulti;
/* vector array operations */
ops->nvlinearsumvectorarray = w->ops->nvlinearsumvectorarray;
ops->nvscalevectorarray = w->ops->nvscalevectorarray;
ops->nvconstvectorarray = w->ops->nvconstvectorarray;
ops->nvwrmsnormvectorarray = w->ops->nvwrmsnormvectorarray;
ops->nvwrmsnormmaskvectorarray = w->ops->nvwrmsnormmaskvectorarray;
ops->nvscaleaddmultivectorarray = w->ops->nvscaleaddmultivectorarray;
ops->nvlinearcombinationvectorarray = w->ops->nvlinearcombinationvectorarray;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (NV_OWN_DATA_OMPDEV(v) == SUNTRUE) {
/* Free host memory */
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
/* Free device memory */
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
free(v->content); v->content = NULL;
free(v->ops); v->ops = NULL;
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum/N));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(SUNRsqrt(sum / N));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
convolution_sgemm_pack1to4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack1to4_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 2u, 1, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 2u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
unsigned short* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_NEON
vst1q_u16(tmpptr, vld1q_u16(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __ARM_NEON
img0 += size;
tmpptr += 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_NEON
vst1_u16(tmpptr, vld1_u16(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
#endif // __ARM_NEON
img0 += size;
tmpptr += 4;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
int nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float32x4_t _sum00 = vld1q_f32(biasptr);
float32x4_t _sum01 = vld1q_f32(biasptr);
float32x4_t _sum02 = vld1q_f32(biasptr);
float32x4_t _sum03 = vld1q_f32(biasptr);
float32x4_t _sum04 = vld1q_f32(biasptr);
float32x4_t _sum05 = vld1q_f32(biasptr);
float32x4_t _sum06 = vld1q_f32(biasptr);
float32x4_t _sum07 = vld1q_f32(biasptr);
float32x4_t _sum10 = vld1q_f32(biasptr + 4);
float32x4_t _sum11 = vld1q_f32(biasptr + 4);
float32x4_t _sum12 = vld1q_f32(biasptr + 4);
float32x4_t _sum13 = vld1q_f32(biasptr + 4);
float32x4_t _sum14 = vld1q_f32(biasptr + 4);
float32x4_t _sum15 = vld1q_f32(biasptr + 4);
float32x4_t _sum16 = vld1q_f32(biasptr + 4);
float32x4_t _sum17 = vld1q_f32(biasptr + 4);
for (int j = 0; j < nn; j++)
{
float32x4_t _val0 = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _val1 = vcvt_f32_bf16(vld1_u16(tmpptr + 4));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4));
_sum00 = vmlaq_laneq_f32(_sum00, _w0, _val0, 0);
_sum01 = vmlaq_laneq_f32(_sum01, _w0, _val0, 1);
_sum02 = vmlaq_laneq_f32(_sum02, _w0, _val0, 2);
_sum03 = vmlaq_laneq_f32(_sum03, _w0, _val0, 3);
_sum04 = vmlaq_laneq_f32(_sum04, _w0, _val1, 0);
_sum05 = vmlaq_laneq_f32(_sum05, _w0, _val1, 1);
_sum06 = vmlaq_laneq_f32(_sum06, _w0, _val1, 2);
_sum07 = vmlaq_laneq_f32(_sum07, _w0, _val1, 3);
_sum10 = vmlaq_laneq_f32(_sum10, _w1, _val0, 0);
_sum11 = vmlaq_laneq_f32(_sum11, _w1, _val0, 1);
_sum12 = vmlaq_laneq_f32(_sum12, _w1, _val0, 2);
_sum13 = vmlaq_laneq_f32(_sum13, _w1, _val0, 3);
_sum14 = vmlaq_laneq_f32(_sum14, _w1, _val1, 0);
_sum15 = vmlaq_laneq_f32(_sum15, _w1, _val1, 1);
_sum16 = vmlaq_laneq_f32(_sum16, _w1, _val1, 2);
_sum17 = vmlaq_laneq_f32(_sum17, _w1, _val1, 3);
tmpptr += 8;
kptr0 += 8;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum00));
vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum01));
vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum02));
vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum03));
vst1_u16(outptr0 + 16, vcvt_bf16_f32(_sum04));
vst1_u16(outptr0 + 20, vcvt_bf16_f32(_sum05));
vst1_u16(outptr0 + 24, vcvt_bf16_f32(_sum06));
vst1_u16(outptr0 + 28, vcvt_bf16_f32(_sum07));
vst1_u16(outptr1, vcvt_bf16_f32(_sum10));
vst1_u16(outptr1 + 4, vcvt_bf16_f32(_sum11));
vst1_u16(outptr1 + 8, vcvt_bf16_f32(_sum12));
vst1_u16(outptr1 + 12, vcvt_bf16_f32(_sum13));
vst1_u16(outptr1 + 16, vcvt_bf16_f32(_sum14));
vst1_u16(outptr1 + 20, vcvt_bf16_f32(_sum15));
vst1_u16(outptr1 + 24, vcvt_bf16_f32(_sum16));
vst1_u16(outptr1 + 28, vcvt_bf16_f32(_sum17));
outptr0 += 32;
outptr1 += 32;
}
for (; i + 3 < size; i += 4)
{
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float32x4_t _sum00 = vld1q_f32(biasptr);
float32x4_t _sum01 = vld1q_f32(biasptr);
float32x4_t _sum02 = vld1q_f32(biasptr);
float32x4_t _sum03 = vld1q_f32(biasptr);
float32x4_t _sum10 = vld1q_f32(biasptr + 4);
float32x4_t _sum11 = vld1q_f32(biasptr + 4);
float32x4_t _sum12 = vld1q_f32(biasptr + 4);
float32x4_t _sum13 = vld1q_f32(biasptr + 4);
for (int j = 0; j < nn; j++)
{
float32x4_t _val = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4));
_sum00 = vmlaq_laneq_f32(_sum00, _w0, _val, 0);
_sum01 = vmlaq_laneq_f32(_sum01, _w0, _val, 1);
_sum02 = vmlaq_laneq_f32(_sum02, _w0, _val, 2);
_sum03 = vmlaq_laneq_f32(_sum03, _w0, _val, 3);
_sum10 = vmlaq_laneq_f32(_sum10, _w1, _val, 0);
_sum11 = vmlaq_laneq_f32(_sum11, _w1, _val, 1);
_sum12 = vmlaq_laneq_f32(_sum12, _w1, _val, 2);
_sum13 = vmlaq_laneq_f32(_sum13, _w1, _val, 3);
tmpptr += 4;
kptr0 += 8;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum00));
vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum01));
vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum02));
vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum03));
vst1_u16(outptr1, vcvt_bf16_f32(_sum10));
vst1_u16(outptr1 + 4, vcvt_bf16_f32(_sum11));
vst1_u16(outptr1 + 8, vcvt_bf16_f32(_sum12));
vst1_u16(outptr1 + 12, vcvt_bf16_f32(_sum13));
outptr0 += 16;
outptr1 += 16;
}
for (; i < size; i++)
{
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float32x4_t _sum0 = vld1q_f32(biasptr);
float32x4_t _sum1 = vld1q_f32(biasptr + 4);
for (int j = 0; j < nn; j++)
{
float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(tmpptr[0]));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4));
_sum0 = vmlaq_f32(_sum0, _val, _w0);
_sum1 = vmlaq_f32(_sum1, _val, _w1);
tmpptr += 1;
kptr0 += 8;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
vst1_u16(outptr1, vcvt_bf16_f32(_sum1));
outptr0 += 4;
outptr1 += 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const unsigned short* tmpptr = tmp.channel(i / 8);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __ARM_NEON
float32x4_t _sum0 = vld1q_f32(biasptr);
float32x4_t _sum1 = vld1q_f32(biasptr);
float32x4_t _sum2 = vld1q_f32(biasptr);
float32x4_t _sum3 = vld1q_f32(biasptr);
float32x4_t _sum4 = vld1q_f32(biasptr);
float32x4_t _sum5 = vld1q_f32(biasptr);
float32x4_t _sum6 = vld1q_f32(biasptr);
float32x4_t _sum7 = vld1q_f32(biasptr);
for (int j = 0; j < nn; j++)
{
float32x4_t _val0 = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _val1 = vcvt_f32_bf16(vld1_u16(tmpptr + 4));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
#if __aarch64__
_sum0 = vmlaq_laneq_f32(_sum0, _w0, _val0, 0);
_sum1 = vmlaq_laneq_f32(_sum1, _w0, _val0, 1);
_sum2 = vmlaq_laneq_f32(_sum2, _w0, _val0, 2);
_sum3 = vmlaq_laneq_f32(_sum3, _w0, _val0, 3);
_sum4 = vmlaq_laneq_f32(_sum4, _w0, _val1, 0);
_sum5 = vmlaq_laneq_f32(_sum5, _w0, _val1, 1);
_sum6 = vmlaq_laneq_f32(_sum6, _w0, _val1, 2);
_sum7 = vmlaq_laneq_f32(_sum7, _w0, _val1, 3);
#else
_sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_val0), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_val0), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w0, vget_high_f32(_val0), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w0, vget_high_f32(_val0), 1);
_sum4 = vmlaq_lane_f32(_sum4, _w0, vget_low_f32(_val1), 0);
_sum5 = vmlaq_lane_f32(_sum5, _w0, vget_low_f32(_val1), 1);
_sum6 = vmlaq_lane_f32(_sum6, _w0, vget_high_f32(_val1), 0);
_sum7 = vmlaq_lane_f32(_sum7, _w0, vget_high_f32(_val1), 1);
#endif
tmpptr += 8;
kptr0 += 4;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum1));
vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum2));
vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum3));
vst1_u16(outptr0 + 16, vcvt_bf16_f32(_sum4));
vst1_u16(outptr0 + 20, vcvt_bf16_f32(_sum5));
vst1_u16(outptr0 + 24, vcvt_bf16_f32(_sum6));
vst1_u16(outptr0 + 28, vcvt_bf16_f32(_sum7));
outptr0 += 32;
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum0_4 = biasptr[0];
float sum0_5 = biasptr[0];
float sum0_6 = biasptr[0];
float sum0_7 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum1_4 = biasptr[1];
float sum1_5 = biasptr[1];
float sum1_6 = biasptr[1];
float sum1_7 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum2_4 = biasptr[2];
float sum2_5 = biasptr[2];
float sum2_6 = biasptr[2];
float sum2_7 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
float sum3_4 = biasptr[3];
float sum3_5 = biasptr[3];
float sum3_6 = biasptr[3];
float sum3_7 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
sum0_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]);
sum0_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]);
sum0_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]);
sum0_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]);
sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]);
sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]);
sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]);
sum1_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[1]);
sum1_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[1]);
sum1_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[1]);
sum1_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[1]);
sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]);
sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]);
sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]);
sum2_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[2]);
sum2_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[2]);
sum2_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[2]);
sum2_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[2]);
sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]);
sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]);
sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]);
sum3_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[3]);
sum3_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[3]);
sum3_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[3]);
sum3_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[3]);
tmpptr += 8;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0_0);
outptr0[1] = float32_to_bfloat16(sum1_0);
outptr0[2] = float32_to_bfloat16(sum2_0);
outptr0[3] = float32_to_bfloat16(sum3_0);
outptr0[4] = float32_to_bfloat16(sum0_1);
outptr0[5] = float32_to_bfloat16(sum1_1);
outptr0[6] = float32_to_bfloat16(sum2_1);
outptr0[7] = float32_to_bfloat16(sum3_1);
outptr0[8] = float32_to_bfloat16(sum0_2);
outptr0[9] = float32_to_bfloat16(sum1_2);
outptr0[10] = float32_to_bfloat16(sum2_2);
outptr0[11] = float32_to_bfloat16(sum3_2);
outptr0[12] = float32_to_bfloat16(sum0_3);
outptr0[13] = float32_to_bfloat16(sum1_3);
outptr0[14] = float32_to_bfloat16(sum2_3);
outptr0[15] = float32_to_bfloat16(sum3_3);
outptr0[16] = float32_to_bfloat16(sum0_4);
outptr0[17] = float32_to_bfloat16(sum1_4);
outptr0[18] = float32_to_bfloat16(sum2_4);
outptr0[19] = float32_to_bfloat16(sum3_4);
outptr0[20] = float32_to_bfloat16(sum0_5);
outptr0[21] = float32_to_bfloat16(sum1_5);
outptr0[22] = float32_to_bfloat16(sum2_5);
outptr0[23] = float32_to_bfloat16(sum3_5);
outptr0[24] = float32_to_bfloat16(sum0_6);
outptr0[25] = float32_to_bfloat16(sum1_6);
outptr0[26] = float32_to_bfloat16(sum2_6);
outptr0[27] = float32_to_bfloat16(sum3_6);
outptr0[28] = float32_to_bfloat16(sum0_7);
outptr0[29] = float32_to_bfloat16(sum1_7);
outptr0[30] = float32_to_bfloat16(sum2_7);
outptr0[31] = float32_to_bfloat16(sum3_7);
outptr0 += 32;
#endif // __ARM_NEON
}
for (; i + 3 < size; i += 4)
{
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __ARM_NEON
float32x4_t _sum0 = vld1q_f32(biasptr);
float32x4_t _sum1 = vld1q_f32(biasptr);
float32x4_t _sum2 = vld1q_f32(biasptr);
float32x4_t _sum3 = vld1q_f32(biasptr);
for (int j = 0; j < nn; j++)
{
float32x4_t _val = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
#if __aarch64__
_sum0 = vmlaq_laneq_f32(_sum0, _w0, _val, 0);
_sum1 = vmlaq_laneq_f32(_sum1, _w0, _val, 1);
_sum2 = vmlaq_laneq_f32(_sum2, _w0, _val, 2);
_sum3 = vmlaq_laneq_f32(_sum3, _w0, _val, 3);
#else
_sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_val), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_val), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w0, vget_high_f32(_val), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w0, vget_high_f32(_val), 1);
#endif
tmpptr += 4;
kptr0 += 4;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum1));
vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum2));
vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum3));
outptr0 += 16;
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]);
sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]);
sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]);
sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]);
sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]);
sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]);
sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]);
sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]);
sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]);
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0_0);
outptr0[1] = float32_to_bfloat16(sum1_0);
outptr0[2] = float32_to_bfloat16(sum2_0);
outptr0[3] = float32_to_bfloat16(sum3_0);
outptr0[4] = float32_to_bfloat16(sum0_1);
outptr0[5] = float32_to_bfloat16(sum1_1);
outptr0[6] = float32_to_bfloat16(sum2_1);
outptr0[7] = float32_to_bfloat16(sum3_1);
outptr0[8] = float32_to_bfloat16(sum0_2);
outptr0[9] = float32_to_bfloat16(sum1_2);
outptr0[10] = float32_to_bfloat16(sum2_2);
outptr0[11] = float32_to_bfloat16(sum3_2);
outptr0[12] = float32_to_bfloat16(sum0_3);
outptr0[13] = float32_to_bfloat16(sum1_3);
outptr0[14] = float32_to_bfloat16(sum2_3);
outptr0[15] = float32_to_bfloat16(sum3_3);
outptr0 += 16;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __ARM_NEON
float32x4_t _sum = vld1q_f32(biasptr);
for (int j = 0; j < nn; j++)
{
float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(tmpptr[0]));
float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0));
_sum = vmlaq_f32(_sum, _val, _w0);
tmpptr += 1;
kptr0 += 4;
}
vst1_u16(outptr0, vcvt_bf16_f32(_sum));
outptr0 += 4;
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum1 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum2 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum3 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
tmpptr++;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0);
outptr0[1] = float32_to_bfloat16(sum1);
outptr0[2] = float32_to_bfloat16(sum2);
outptr0[3] = float32_to_bfloat16(sum3);
outptr0 += 4;
#endif // __ARM_NEON
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack1to4_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __ARM_NEON && __aarch64__
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4, (size_t)2u);
#else
kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)2u);
#endif
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
unsigned short* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k40[k]);
g00[5] = float32_to_bfloat16(k50[k]);
g00[6] = float32_to_bfloat16(k60[k]);
g00[7] = float32_to_bfloat16(k70[k]);
g00 += 8;
}
}
}
#endif // __ARM_NEON && __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __ARM_NEON && __aarch64__
unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
unsigned short* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00 += 4;
}
}
}
}
static void convolution_im2col_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
unsigned short* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int check_results(double* A){
for (int i = 0 ; i < N ; i++){
if (A[i] != TRIALS){
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
return 0;
}
}
return 1;
}
int check_results_priv(double *A, double *B){
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*3) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
return 0;
}
if (B[i] != TRIALS*7) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
return 0;
}
}
return 1;
}
#define CODE() \
ZERO(A); \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
A[i] += C[i]; \
} \
} \
success += check_results(A); \
if (success == expected) \
printf("Succeeded\n");
#define CODE_PRIV() \
ZERO(A); \
ZERO(B); \
p = 2.0; \
q = 4.0; \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
p = 3; \
q = 7; \
A[i] += p; \
B[i] += q; \
} \
} \
success += check_results_priv(A, B); \
if (success == expected) \
printf("Succeeded\n");
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
int expected = 1;
int success = 0;
int chunkSize;
double p = 2.0, q = 4.0;
int nte, tl, blockSize;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
printf("iterations = teams\n");
#define CLAUSES num_teams(992)
CODE()
#undef CLAUSES
printf("iterations > teams\n");
#define CLAUSES num_teams(256)
CODE()
#undef CLAUSES
printf("iterations < teams\n");
#define CLAUSES num_teams(1024)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,1)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,512)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 512)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(512) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(1024) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(1024) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 3)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 3)
CODE()
#undef CLAUSES
printf("num_teams(10) dist_schedule(static, 99)\n");
#define CLAUSES num_teams(10) dist_schedule(static, 99)
CODE()
#undef CLAUSES
printf("num_teams(256) dist_schedule(static, 992)\n");
#define CLAUSES num_teams(256) dist_schedule(static, 992)
CODE()
#undef CLAUSES
#if 0
printf("num_teams(256) private(p,q)\n");
#define CLAUSES num_teams(256) private(p,q)
CODE_PRIV()
#undef CLAUSES
#endif
//
// Test: firstprivate
//
#if 0
printf("num_teams(64) firstprivate(p, q)\n");
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation
#pragma omp teams distribute num_teams(64) firstprivate(p, q)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
//
// Test: lastprivate
//
#if 0
printf("num_teams(10) lastprivate(lastpriv)\n");
success = 0;
int lastpriv = -1;
#pragma omp target data map(tofrom:lastpriv)
#pragma omp target teams distribute simd num_teams(10) lastprivate(lastpriv)
for(int i = 0 ; i < omp_get_num_teams() ; i++)
lastpriv = omp_get_team_num();
if(lastpriv != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv, 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
// // ***************************
// // Series 4: with parallel for
// // ***************************
//
// Test: simple blocking loop
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 256 ; j += blockSize) {
for(int i = j ; i < j+blockSize; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: blocking loop where upper bound is not a multiple of tl*nte
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 510 ; j += blockSize) {
int ub = (j+blockSize < 510) ? (j+blockSize) : 512;
for(int i = j ; i < ub; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 5: collapse
// **************************
//
// Test: 2 loops
//
printf("num_teams(512) collapse(2)\n");
success = 0;
double * S = (double *) malloc(N*N*sizeof(double));
double * T = (double *) malloc(N*N*sizeof(double));
double * U = (double *) malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target data map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N])
#pragma omp target teams distribute simd num_teams(512) collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
printf("num_teams(512) collapse(3)\n");
success = 0;
int M = N/8;
double * V = (double *) malloc(M*M*M*sizeof(double));
double * Z = (double *) malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target data map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M])
#pragma omp target teams distribute simd num_teams(512) collapse(3)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
SE_fg_int_extended_split_mex.c | #include "mex.h"
#include "../SE_fgg.h"
void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int);
#define X prhs[0] // this arg is unused
#define HH prhs[1]
#define OPT prhs[2]
#define ZS prhs[3]
#define ZX prhs[4]
#define ZY prhs[5]
#define ZZ prhs[6]
#define IDX prhs[7]
#define PHI_OUT plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const int N = mxGetM(IDX);
SE_FGG_params params;
SE_FGG_MEX_params(¶ms, OPT, N);
SE_FGG_work work;
// attach pre-computed quantities
work.zs = mxGetPr(ZS);
work.zx = mxGetPr(ZX);
work.zy = mxGetPr(ZY);
work.zz = mxGetPr(ZZ);
work.idx = (int*)mxGetData(IDX);
work.H = mxGetPr(HH);
// output vector
PHI_OUT = mxCreateDoubleMatrix(N,1,mxREAL);
double* phi = mxGetPr(PHI_OUT);
if(VERBOSE)
mexPrintf("[SE%s FG(i)] N=%d, P=%d\n",PER_STR,N,params.P);
if(N==1)
{
// Don't thread for single target
#ifdef __AVX__
SE_FGG_int_split_AVX_dispatch(phi, &work, ¶ms);
#else
SE_FGG_int_split_SSE_dispatch(phi, &work, ¶ms);
#endif
}
else
{
#ifdef _OPENMP
#pragma omp parallel default(shared)
#endif
{
// now do the work
#ifdef __AVX__
SE_FGG_int_split_AVX_dispatch(phi, &work, ¶ms);
#else
SE_FGG_int_split_SSE_dispatch(phi, &work, ¶ms);
#endif
}
}
}
|
GB_binop__ge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_uint8
// A.*B function (eWiseMult): GB_AemultB__ge_uint8
// A*D function (colscale): GB_AxD__ge_uint8
// D*A function (rowscale): GB_DxB__ge_uint8
// C+=B function (dense accum): GB_Cdense_accumB__ge_uint8
// C+=b function (dense accum): GB_Cdense_accumb__ge_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint8
// C=scalar+B GB_bind1st__ge_uint8
// C=scalar+B' GB_bind1st_tran__ge_uint8
// C=A+scalar GB_bind2nd__ge_uint8
// C=A'+scalar GB_bind2nd_tran__ge_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT8 || GxB_NO_GE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
smul_glv4.c | //#define DEBUG_MODE
#ifdef DEBUG_MODE
#include <stdio.h>
#include "kernel.h"
#else
#include "_core.h"
#endif
#include "multiprecision.h"
#include "multiprecision_stack.h"
#include "finite128.h"
static inline void phi(DIV_hec_fp_2e128mc D3, const DIV_hec_fp_2e128mc D1, const CNS_hec_fp_2e128mc_glv4 cn){
fp_mul_2e128mc_x8664_asm(D3->Q, cn->prm, NULL, D1->Q, cn->u1z);
fp_mul_2e128mc_x8664_asm(D3->R, cn->prm, NULL, D1->R, cn->u0z);
fp_mul_2e128mc_x8664_asm(D3->S, cn->prm, NULL, D1->S, cn->v1z);
fp_mul_2e128mc_x8664_asm(D3->T, cn->prm, NULL, D1->T, cn->v0z);
fp_cpy_2e128mc_x8664_asm(D3->Z, cn->prm, NULL, D1->Z);
fp_cpy_2e128mc_x8664_asm(D3->W, cn->prm, NULL, D1->W);
fp_cpy_2e128mc_x8664_asm(D3->V, cn->prm, NULL, D1->V);
}
void hec_fp_smul_2e128mc_bk_jac_glv4(DIV_hec_fp_2e128mc D1, const uni kn, DIV_hec_fp_2e128mc D2, CNS_hec_fp_2e128mc_glv4 cn)
{
uni_t y1s[4*FP_LEN], y2s[4*FP_LEN], y3s[4*FP_LEN], y4s[4*FP_LEN], rs[4*FP_LEN], ts[4*FP_LEN], k0s[4*FP_LEN], k1s[4*FP_LEN], k2s[4*FP_LEN], k3s[4*FP_LEN], Ks[4*FP_LEN], Ts[4*FP_LEN];
MI_t y1, y2, y3, y4, ah1, ah2, ah3, ah4, N, r, k, t, A1, A2, A3, A4, k0, k1, k2, k3, K, T, Nt;
DIV_hec_fp_2e128mc_t d0, d1, d2, d3, tbl[16];
TEMP_hec_fp_2e128mc_t w;
int j, ei, b, bt;
w->cn->prm = cn->prm;
fp_neg_2e128mc_x8664_asm(w->cn->a2, w->cn->prm, NULL, cn->a2);
fp_cpy_2e128mc_x8664_asm(w->cn->a3, w->cn->prm, NULL, cn->a3);
/*uni_t i; fp_cnt_bits(&i, kn, FP_LEN*2); printf("%d ", i);*/
ah1->v->n = (uni)cn->ah1; ah1->v->l = 3*FP_LEN/2; ah1->s = NEGATIVE;
ah2->v->n = (uni)cn->ah2; ah2->v->l = 3*FP_LEN/2; ah2->s = NEGATIVE;
ah3->v->n = (uni)cn->ah3; ah3->v->l = 3*FP_LEN/2; ah3->s = NEGATIVE;
ah4->v->n = (uni)cn->ah4; ah4->v->l = 3*FP_LEN/2; ah4->s = NEGATIVE;
A1->v->n = (uni)cn->A1; A1->v->l = FP_LEN/2; A1->s = POSITIVE;
A2->v->n = (uni)cn->A2; A2->v->l = FP_LEN/2; A2->s = POSITIVE;
A3->v->n = (uni)cn->A3; A3->v->l = FP_LEN/2; A3->s = NEGATIVE;
A4->v->n = (uni)cn->A4; A4->v->l = FP_LEN/2; A4->s = NEGATIVE;
N->v->n = (uni)cn->N; N->v->l = 2*FP_LEN; N->s = POSITIVE;
Nt->v->n = (uni)cn->Nhalf; Nt->v->l = 2*FP_LEN; Nt->s = POSITIVE;
k->v->n = kn; k->v->l = 2*FP_LEN; k->s = POSITIVE;
y1->v->n = y1s; y2->v->n = y2s; y3->v->n = y3s; y4->v->n = y4s;
k0->v->n = k0s; k1->v->n = k1s; k2->v->n = k2s; k3->v->n = k3s;
t->v->n = ts; r->v->n = rs; T->v->n = Ts; K->v->n = Ks;
mi_mul_stack(t, ah1, k);
mi_div_q_r_stack(y1, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y1, y1, 1);
}
else{
mi_sub_1_stack(y1, y1, 1);
}
}
mi_mul_stack(t, ah2, k);
mi_div_q_r_stack(y2, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y2, y2, 1);
}
else{
mi_sub_1_stack(y2, y2, 1);
}
}
mi_mul_stack(t, ah3, k);
mi_div_q_r_stack(y3, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y3, y3, 1);
}
else{
mi_sub_1_stack(y3, y3, 1);
}
}
mi_mul_stack(t, ah4, k);
mi_div_q_r_stack(y4, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y4, y4, 1);
}
else{
mi_sub_1_stack(y4, y4, 1);
}
}
mi_mul_stack(K, A2, y4); //A:=a2*y4;
mi_mul_stack(T, A3, y3); //T:=a3*y3;
mi_add_stack(K, K, T); //A:=A+T;
mi_mul_stack(T, A4, y2); //T:=a4*y2;
mi_add_stack(K, K, T); //A:=A+T;
mi_mul_stack(k0, A1, y1); //k0:=a1*y1;
mi_mul_stack(T, A4, y3); //T:=a4*y3;
mi_add_stack(k0, k0, T); //k0:=k0+T;
mi_mul_stack(T, A3, y4); //T:=a3*y4;
mi_add_stack(k0, k0, T); //k0:=k0+T;
mi_sub_stack(k0, k0, K); //k0:=k0-A;
mi_sub_stack(k0, k, k0); //k0:=k-k0;
mi_mul_stack(k1, A1, y2); //k1:=a1*y2;
mi_mul_stack(T, A2, y1); //T:=a2*y1;
mi_add_stack(k1, k1, T); //k1:=k1+T;
mi_mul_stack(T, A4, y4); //T:=a4*y4;
mi_add_stack(k1, k1, T); //k1:=k1+T;
mi_sub_stack(k1, K, k1); //k1:=A-k1;
mi_mul_stack(k2, A1, y3); //k2:=a1*y3;
mi_mul_stack(T, A2, y2); //T:=a2*y2;
mi_add_stack(k2, k2, T); //k2:=k2+T;
mi_mul_stack(T, A3, y1); //T:=a3*y1;
mi_add_stack(k2, k2, T); //k2:=k2+T;
mi_sub_stack(k2, K, k2); //k2:=A-k2;
mi_mul_stack(k3, A2, y3); //k3:=a2*y3;
mi_mul_stack(T, A1, y4); //T:=a1*y4;
mi_add_stack(k3, k3, T); //k3:=k3+T;
mi_mul_stack(T, A4, y1); //T:=a4*y1;
mi_add_stack(k3, k3, T); //k3:=k3+T;
mi_mul_stack(T, A3, y2); //T:=a3*y2;
mi_add_stack(k3, k3, T); //k3:=k3+T;
mi_sub_stack(k3, K, k3); //k3:=A-k3;
hec_fp_cpy_2e128mc_g2i_jac(w, d0, D2);
phi(d1, d0, cn);
phi(d2, d1, cn);
phi(d3, d2, cn);
if(k0->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i_jac(w, d0, d0);
}
if(k1->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i_jac(w, d1, d1);
}
if(k2->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i_jac(w, d2, d2);
}
if(k3->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i_jac(w, d3, d3);
}
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d0);
hec_fp_aadd_2e128mc_g2i_jac(w, d1);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[0], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d0);
hec_fp_aadd_2e128mc_g2i_jac(w, d2);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[1], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d1);
hec_fp_aadd_2e128mc_g2i_jac(w, d2);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[2], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[0]);
hec_fp_madd_2e128mc_g2i_jac(w, d2);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[3], w->D1);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d0);
hec_fp_aadd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[4], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d1);
hec_fp_aadd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[5], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[0]);
hec_fp_madd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[6], w->D1);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, d2);
hec_fp_aadd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[7], w->D3);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[1]);
hec_fp_madd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[8], w->D1);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[2]);
hec_fp_madd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[9], w->D1);
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[3]);
hec_fp_madd_2e128mc_g2i_jac(w, d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[10], w->D1);
//We first switch to homogeneous projective coordinates (without W or V=W^2) to save field operations and space.
fp_mul_2e128mc_x8664_asm(tbl[0]->W, w->cn->prm, NULL, tbl[0]->Z, tbl[0]->W); //W := W*Z
fp_sqr_2e128mc_x8664_asm(tbl[0]->Z, w->cn->prm, NULL, tbl[0]->Z); //Z := Z^2
fp_mul_2e128mc_x8664_asm(tbl[0]->S, w->cn->prm, NULL, tbl[0]->S, tbl[0]->Z); //S := S*Z^2
fp_mul_2e128mc_x8664_asm(tbl[0]->R, w->cn->prm, NULL, tbl[0]->R, tbl[0]->W); //R := R*W*Z
fp_mul_2e128mc_x8664_asm(tbl[0]->W, w->cn->prm, NULL, tbl[0]->Z, tbl[0]->W); //W := W*Z^3
fp_mul_2e128mc_x8664_asm(tbl[0]->Q, w->cn->prm, NULL, tbl[0]->Q, tbl[0]->W); //Q := Q*W*Z^3
fp_mul_2e128mc_x8664_asm(tbl[0]->Z, w->cn->prm, NULL, tbl[0]->Z, tbl[0]->W); //Z := W*Z^5
fp_cpy_2e128mc_x8664_asm(tbl[0]->V, w->cn->prm, NULL, tbl[0]->Z); //V is accumulator now, W is temp now.
for(j = 1; j < 11; j++){
fp_mul_2e128mc_x8664_asm(tbl[j]->W, w->cn->prm, NULL, tbl[j]->Z, tbl[j]->W); //W := W*Z
fp_sqr_2e128mc_x8664_asm(tbl[j]->Z, w->cn->prm, NULL, tbl[j]->Z); //Z := Z^2
fp_mul_2e128mc_x8664_asm(tbl[j]->S, w->cn->prm, NULL, tbl[j]->S, tbl[j]->Z); //S := S*Z^2
fp_mul_2e128mc_x8664_asm(tbl[j]->R, w->cn->prm, NULL, tbl[j]->R, tbl[j]->W); //R := R*W*Z
fp_mul_2e128mc_x8664_asm(tbl[j]->W, w->cn->prm, NULL, tbl[j]->Z, tbl[j]->W); //W := W*Z^3
fp_mul_2e128mc_x8664_asm(tbl[j]->Q, w->cn->prm, NULL, tbl[j]->Q, tbl[j]->W); //Q := Q*W*Z^3
fp_mul_2e128mc_x8664_asm(tbl[j]->Z, w->cn->prm, NULL, tbl[j]->Z, tbl[j]->W); //Z := W*Z^5
fp_mul_2e128mc_x8664_asm(tbl[j]->V, w->cn->prm, NULL, tbl[j]->Z, tbl[j-1]->V);
}
fp_inv_2e128mc_x8664(tbl[11-1]->V, w->cn->prm, NULL, tbl[11-1]->V);
for(j = 11-1; j > 0; j--){
fp_mul_2e128mc_x8664_asm(tbl[j]->W, w->cn->prm, NULL, tbl[j-1]->V, tbl[11-1]->V);
fp_mul_2e128mc_x8664_asm(tbl[11-1]->V, w->cn->prm, NULL, tbl[j]->Z, tbl[11-1]->V);
fp_mul_2e128mc_x8664_asm(tbl[j]->Q, w->cn->prm, NULL, tbl[j]->W, tbl[j]->Q);
fp_mul_2e128mc_x8664_asm(tbl[j]->R, w->cn->prm, NULL, tbl[j]->W, tbl[j]->R);
fp_mul_2e128mc_x8664_asm(tbl[j]->S, w->cn->prm, NULL, tbl[j]->W, tbl[j]->S);
fp_mul_2e128mc_x8664_asm(tbl[j]->T, w->cn->prm, NULL, tbl[j]->W, tbl[j]->T);
}
fp_mul_2e128mc_x8664_asm(tbl[0]->Q, w->cn->prm, NULL, tbl[11-1]->V, tbl[0]->Q);
fp_mul_2e128mc_x8664_asm(tbl[0]->R, w->cn->prm, NULL, tbl[11-1]->V, tbl[0]->R);
fp_mul_2e128mc_x8664_asm(tbl[0]->S, w->cn->prm, NULL, tbl[11-1]->V, tbl[0]->S);
fp_mul_2e128mc_x8664_asm(tbl[0]->T, w->cn->prm, NULL, tbl[11-1]->V, tbl[0]->T);
for(j = 0; j < 11; j++){
fp_st1_2e128mc_x8664_asm(tbl[j]->Z, w->cn->prm, NULL, 1);
fp_st1_2e128mc_x8664_asm(tbl[j]->W, w->cn->prm, NULL, 1);
fp_st1_2e128mc_x8664_asm(tbl[j]->V, w->cn->prm, NULL, 1);
}
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[15], tbl[10]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[14], tbl[9]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[13], tbl[8]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[12], tbl[7]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[11], tbl[6]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[10], tbl[5]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[9], tbl[4]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[8], d3);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[7], tbl[3]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[6], tbl[2]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[5], tbl[1]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[4], d2);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[3], tbl[0]);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[2], d1);
hec_fp_cpy_2e128mc_g2i_jac(w, tbl[1], d0);
fp_st1_2e128mc_x8664_asm(tbl[0]->Z, cn->prm, NULL, 0); //Marker for the identity element.
fp_st1_2e128mc_x8664_asm(tbl[0]->W, cn->prm, NULL, 0); //Marker for the identity element.
/*TODO: The following lines solves a minor problem caused by 1-bit-longer-than-expected mini-scalars at low-level. This can be made prettier. */
fp_cnt_bits(&bt, k0->v->n, k0->v->l);
fp_cnt_bits(&b, k1->v->n, k1->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k2->v->n, k2->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k3->v->n, k3->v->l);
if(b > bt){ bt = b; }
k0->v->n[k0->v->l] = 0;
k1->v->n[k1->v->l] = 0;
k2->v->n[k2->v->l] = 0;
k3->v->n[k3->v->l] = 0;
int bp;
j = bt;
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
hec_fp_cpy_2e128mc_g2i_jac(w, w->D1, tbl[b]);
bp = b;
//#pragma omp parallel for num_threads(2)
for(j = bt-1; j > 0; j--){
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
if(b == bp){
hec_fp_dbl_2e128mc_g2i_jac_a2a3is0(w);
hec_fp_madd_2e128mc_g2i_jac(w, tbl[b]);
}else{
hec_fp_mdbladd_2e128mc_g2i_jac(w, tbl[b]);
}
}
hec_fp_cpy_2e128mc_g2i_jac(w, D1, w->D1);
}
|
common_cpu.h | #ifndef COMMON_CPU
#define COMMON_CPU
#if defined(_OPENMP)
#include <omp.h>
#endif
template <typename FunctorT>
void iterate_cpu(FunctorT functor, int N) {
for(int idx = 0; idx < N; ++idx) {
functor(idx);
}
}
template <typename FunctorT>
void iterate_omp_cpu(FunctorT functor, int N, int n_threads) {
#if defined(_OPENMP)
omp_set_num_threads(n_threads);
#pragma omp parallel for
#endif
for(int idx = 0; idx < N; ++idx) {
functor(idx);
}
}
#endif
|
GB_sparse_add_template.c | //------------------------------------------------------------------------------
// GB_sparse_add_template: C=A+B, C<M>=A+B when C is sparse/hypersparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is sparse or hypersparse:
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse bitmap
// sparse sparse sparse full
// sparse sparse bitmap sparse
// sparse sparse bitmap bitmap
// sparse sparse bitmap full
// sparse sparse full sparse
// sparse sparse full bitmap
// sparse sparse full full
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// If all four matrices are sparse/hypersparse, and C<!M>=A+B is being
// computed, then M is passed in as NULL to GB_add_phase*. GB_add_sparsity
// returns apply_mask as false. The methods below do not handle the case when
// C is sparse, M is sparse, and !M is used. All other uses of !M when M
// is sparse result in a bitmap structure for C, and this is handled by
// GB_bitmap_add_template.
// For this case: the mask is done later, so C=A+B is computed here:
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
{
#ifdef GB_DEBUG
if (M == NULL || M_is_bitmap || M_is_full)
{
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
}
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j)
// phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k ] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (C_to_A == NULL) ? j : C_to_A [k] ;
if (kA >= 0)
{
pA = GBP (Ap, kA, vlen) ;
pA_end = GBP (Ap, kA+1, vlen) ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
int64_t pA_start = pA ;
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1, iA_last = -1 ;
if (ajnz > 0)
{
iA_first = GBI (Ai, pA, vlen) ;
iA_last = GBI (Ai, pA_end-1, vlen) ;
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (C_to_B == NULL) ? j : C_to_B [k] ;
if (kB >= 0)
{
pB = GBP (Bp, kB, vlen) ;
pB_end = GBP (Bp, kB+1, vlen) ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
int64_t pB_start = pB ;
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1, iB_last = -1 ;
if (bjnz > 0)
{
iB_first = GBI (Bi, pB, vlen) ;
iB_last = GBI (Bi, pB_end-1, vlen) ;
}
//------------------------------------------------------------------
// get M(:,j) if M is sparse or hypersparse
//------------------------------------------------------------------
bool sparse_mask_is_easy = false ;
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (M_is_sparse_or_hyper)
{
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1],
// which is a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch_is_Mh)
{
// Ch is the same as Mh (a deep copy)
ASSERT (Ch != NULL) ;
ASSERT (M_is_hyper) ;
ASSERT (Ch [k] == M->h [k]) ;
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = GBP (Mp, kM , vlen) ;
pM_end = GBP (Mp, kM+1, vlen) ;
}
}
// The "easy mask" condition requires M to be sparse/hyper
// and structural. A and B cannot be bitmap. Also one of
// the following 3 conditions must hold:
// (1) all entries are present in A(:,j) and B == M
// (2) all entries are present in B(:,j) and A == M
// (3) both A and B are aliased to M
sparse_mask_is_easy =
Mask_struct && // M must be structural
!A_is_bitmap && // A must not be bitmap
!B_is_bitmap && // B must not be bitmap
((adense && B == M) || // one of 3 conditions holds
(bdense && A == M) ||
(A == M && B == M)) ;
// TODO: add the condition above to GB_add_sparsity,
// where adense/bdense are true for the whole matrix
// (adense is true if A is full, or sparse/hypersparse with
// all entries present). The test here is done vector by
// vector, for each A(:,j) and B(:,j). This is a finer grain
// test, as compared to a test for all of A and B.
}
//------------------------------------------------------------------
// C(:,j)<optional mask> = A (:,j) + B (:,j) or subvector
//------------------------------------------------------------------
if (M == NULL)
{
//--------------------------------------------------------------
// M is not present, or !M is sparse but not applied here
//--------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
// If all four matrices are sparse or hypersparse, and
// Mask_comp is true, the mask M is passed in to this method as
// NULL. C=A+B is computed with no mask, and !M is applied
// later.
// A and B are both sparse or hypersparse, not bitmap or
// full, but individual vectors of A and B might have all
// entries present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
#if defined ( GB_PHASE_1_OF_2 )
if (A_and_B_are_disjoint)
{
// only used by GB_wait, which computes A+T where T is the
// matrix of pending tuples for A. The pattern of pending
// tuples is always disjoint with the pattern of A.
cjnz = ajnz + bjnz ;
}
else
#endif
if (adense && bdense)
{
//----------------------------------------------------------
// Method01: A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ;
#endif
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// Method02: A(:,j) dense, B(:,j) sparse: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Bi [pB + p] ;
int64_t ii = i - iA_first ;
ASSERT (Ai [pA + ii] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + ii, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// Method03: A(:,j) sparse, B(:,j) dense: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
int64_t i = p + iB_first ;
Ci [pC + p] = i ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Ai [pA + p] ;
int64_t ii = i - iB_first ;
ASSERT (Bi [pB + ii] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + ii, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method04: A(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
#endif
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method05: B(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
#endif
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method06: last A(:,j) comes before 1st B(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
pC += ajnz ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
#endif
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method07: last B(:,j) comes before 1st A(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
pC += bjnz ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
#endif
}
#if defined ( GB_PHASE_1_OF_2 )
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// Method08: A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found) cjnz-- ;
}
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// Method09: B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found) cjnz-- ;
}
}
#endif
else
{
//----------------------------------------------------------
// Method10: A(:,j) and B(:,j) about the same sparsity
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
#endif
pA++ ;
}
else if (iA > iB)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
#endif
pB++ ;
}
else
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
#endif
pA++ ;
pB++ ;
}
#if defined ( GB_PHASE_2_OF_2 )
pC++ ;
#else
cjnz++ ;
#endif
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
ajnz = (pA_end - pA) ;
bjnz = (pB_end - pB) ;
ASSERT (ajnz == 0 || bjnz == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz += ajnz + bjnz ;
#else
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
ASSERT (pC + ajnz + bjnz == pC_end) ;
#endif
}
}
else if (sparse_mask_is_easy)
{
//--------------------------------------------------------------
// special case: M is present and very easy to use
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse full
// sparse sparse full sparse
// sparse sparse full full
// A and B are sparse, hypersparse or full, not bitmap.
ASSERT (!A_is_bitmap) ;
ASSERT (!B_is_bitmap) ;
ASSERT (Mask_struct) ;
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
#if defined ( GB_PHASE_1_OF_2 )
// M is structural, and sparse or hypersparse, so every entry
// in the mask is guaranteed to appear in A+B. The symbolic
// count is thus trivial.
cjnz = mjnz ;
#else
// copy the pattern into C (:,j)
int64_t pC_start = pC ;
int64_t pM_start = pM ;
memcpy (Ci + pC, Mi + pM, mjnz * sizeof (int64_t)) ;
int64_t pA_offset = pA_start - iA_first ;
int64_t pB_offset = pB_start - iB_first ;
if (adense && B == M)
{
//----------------------------------------------------------
// Method11: A dense, B == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pA_offset + i, vlen) == i) ;
ASSERT (GBI (Bi, pM, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA_offset + i, A_iso) ;
GB_GETB (bij, Bx, pM, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else if (bdense && A == M)
{
//----------------------------------------------------------
// Method12: B dense, A == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pM, vlen) == i) ;
ASSERT (GBI (Bi, pB_offset + i, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pM, A_iso) ;
GB_GETB (bij, Bx, pB_offset + i, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else // (A == M) && (B == M)
{
//----------------------------------------------------------
// Method13: A == M == B: all three matrices the same
//----------------------------------------------------------
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
#if GB_OP_IS_SECOND
GB_GETB (t, Bx, pM, B_iso) ;
#else
GB_GETA (t, Ax, pM, A_iso) ;
#endif
GB_BINOP (GB_CX (pC), t, t, Mi [pM], j) ;
}
#endif
}
#endif
}
else if (M_is_sparse_or_hyper)
{
//--------------------------------------------------------------
// Method14: C and M are sparse or hypersparse
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (*)
// sparse sparse sparse bitmap (*)
// sparse sparse sparse full (*)
// sparse sparse bitmap sparse (*)
// sparse sparse bitmap bitmap (+)
// sparse sparse bitmap full (+)
// sparse sparse full sparse (*)
// sparse sparse full bitmap (+)
// sparse sparse full full (+)
// (*) This method is efficient except when either A or B are
// sparse, and when M is sparse but with many entries. When M
// is sparse and either A or B are sparse, the method is
// designed to be very efficient when M is very sparse compared
// with A and/or B. It traverses all entries in the sparse M,
// and (for sparse A or B) does a binary search for entries in
// A or B. In that case, if M has many entries, the mask M
// should be ignored, and C=A+B should be computed without any
// mask. The test for when to use M here should ignore A or B
// if they are bitmap or full.
// (+) TODO: if C and M are sparse/hyper, and A and B are
// both bitmap/full, then use GB_emult_03_template instead,
// but with (Ab [p] || Bb [p]) instead of (Ab [p] && Bb [p]).
// A and B can have any sparsity pattern (hypersparse,
// sparse, bitmap, or full).
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) + B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
bool afound ;
if (adense)
{
// A is dense, bitmap, or full; use quick lookup
pA = pA_start + (i - iA_first) ;
afound = GBB (Ab, pA) ;
}
else if (A == M)
{
// A is aliased to M
pA = pM ;
afound = true ;
}
else
{
// A is sparse; use binary search. This is slow unless
// M is very sparse compared with A.
int64_t apright = pA_end - 1 ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
}
ASSERT (GB_IMPLIES (afound, GBI (Ai, pA, vlen) == i)) ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
bool bfound ;
if (bdense)
{
// B is dense; use quick lookup
pB = pB_start + (i - iB_first) ;
bfound = GBB (Bb, pB) ;
}
else if (B == M)
{
// B is aliased to M
pB = pM ;
bfound = true ;
}
else
{
// B is sparse; use binary search. This is slow unless
// M is very sparse compared with B.
int64_t bpright = pB_end - 1 ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
}
ASSERT (GB_IMPLIES (bfound, GBI (Bi, pB, vlen) == i)) ;
//----------------------------------------------------------
// C(i,j) = A(i,j) + B(i,j)
//----------------------------------------------------------
if (afound && bfound)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
else if (afound)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
else if (bfound)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//--------------------------------------------------------------
// M is bitmap or full, for either C<M>=A+B or C<!M>=A+B
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// This method is very efficient for any mask, and should
// always be used if M is bitmap or full, even if the mask must
// also be applied later in GB_mask or GB_accum_mask.
// Exploiting the mask here adds no extra search time, and it
// reduces the size of C on output.
// GB_GET_MIJ: get M(i,j) where M is bitmap or full
#undef GB_GET_MIJ
#define GB_GET_MIJ(i) \
int64_t pM = pM_start + i ; \
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \
if (Mask_comp) mij = !mij ;
// A and B are sparse or hypersparse, not bitmap or full,
// but individual vectors of A and B might have all entries
// present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
int64_t pM_start = j * vlen ;
if (adense && bdense)
{
//----------------------------------------------------------
// Method15: A(:,j) and B(:,j) dense, M bitmap/full
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
}
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method16: A(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method17: B(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method18:last A(:,j) before 1st B(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method19:last B(:,j) before 1st A(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
}
else
{
//----------------------------------------------------------
// Method20: merge A(:,j) and B(:,j), M bitmap/full
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
GB_GET_MIJ (iA) ;
if (mij)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
pA++ ;
}
else if (iA > iB)
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
pB++ ;
}
else
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
pC++ ;
#endif
}
pA++ ;
pB++ ;
}
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t iA = Ai [pA] ;
GB_GET_MIJ (iA) ;
if (mij)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t iB = Bi [pB] ;
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
3622.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp target teams distribute thread_limit(256) schedule(dynamic, 4)
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp target teams distribute thread_limit(256) schedule(dynamic, 4)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
reduction-9.c | char z[10] = { 0 };
__attribute__((noinline, noclone)) void
foo (int (*x)[3][2], int *y, long w[1][2], int p1, long p2, long p3, int p4,
int p5, long p6, short p7)
{
unsigned long long a[p7 + 4];
short b[p7];
int i;
for (i = 0; i < p7 + 4; i++)
{
if (i < p7)
b[i] = -6;
a[i] = 0;
}
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \
reduction(*:y[:p4]) reduction(|:a[:p5]) \
reduction(&:w[0:p6 - 1][:p6]) reduction(max:b)
for (i = 0; i < 128; i++)
{
x[i / 64][i % 3][(i / 4) & 1] += i;
if ((i & 15) == 1)
y[0] *= 3;
if ((i & 31) == 2)
y[1] *= 7;
if ((i & 63) == 3)
y[2] *= 17;
z[i / 32] += (i & 3);
if (i < 4)
z[i] += i;
a[i / 32] |= 1ULL << (i & 30);
w[0][i & 1] &= ~(1L << (i / 17 * 3));
if ((i % 79) > b[0])
b[0] = i % 79;
if ((i % 13) > b[1])
b[1] = i % 13;
if ((i % 23) > b[2])
b[2] = i % 23;
if ((i % 85) > b[3])
b[3] = i % 85;
if ((i % 192) > b[4])
b[4] = i % 192;
}
for (i = 0; i < 9; i++)
if (a[i] != (i < 4 ? 0x55555555ULL : 0))
__builtin_abort ();
if (b[0] != 78 || b[1] != 12 || b[2] != 22 || b[3] != 84 || b[4] != 127)
__builtin_abort ();
}
int
main ()
{
int a[4][3][2] = {};
static int a2[4][3][2] = {{{ 0, 0 }, { 0, 0 }, { 0, 0 }},
{{ 312, 381 }, { 295, 356 }, { 337, 335 }},
{{ 1041, 975 }, { 1016, 1085 }, { 935, 1060 }},
{{ 0, 0 }, { 0, 0 }, { 0, 0 }}};
int y[5] = { 0, 1, 1, 1, 0 };
int y2[5] = { 0, 6561, 2401, 289, 0 };
char z2[10] = { 48, 49, 50, 51, 0, 0, 0, 0, 0, 0 };
long w[1][2] = { ~0L, ~0L };
foo (&a[1], y + 1, w, 1, 3L, 4L, 3, 4, 2L, 5);
if (__builtin_memcmp (a, a2, sizeof (a))
|| __builtin_memcmp (y, y2, sizeof (y))
|| __builtin_memcmp (z, z2, sizeof (z))
|| w[0][0] != ~0x249249L
|| w[0][1] != ~0x249249L)
__builtin_abort ();
return 0;
}
|
convolution_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps(bias + p * 8) : _mm256_setzero_ps();
out.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* kptr = kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr);
__m256 _sum01 = _mm256_setzero_ps();
__m256 _sum10 = _mm256_loadu_ps(outptr + 8);
__m256 _sum11 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r000, _k00, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r001, _k01, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r002, _k02, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r003, _k03, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r004, _k04, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r005, _k05, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r006, _k06, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r007, _k07, _sum01);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r010, _k00, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r011, _k01, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r012, _k02, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r013, _k03, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r014, _k04, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r015, _k05, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r016, _k06, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r017, _k07, _sum11);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r010, _k10, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r011, _k11, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r012, _k12, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r013, _k13, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r014, _k14, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r015, _k15, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r016, _k16, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r017, _k17, _sum01);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r020, _k10, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r021, _k11, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r022, _k12, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r023, _k13, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r024, _k14, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r025, _k15, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r026, _k16, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r027, _k17, _sum11);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r020, _k20, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r021, _k21, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r022, _k22, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r023, _k23, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r024, _k24, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r025, _k25, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r026, _k26, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r027, _k27, _sum01);
__m256 _r030 = _mm256_broadcast_ss(r0 + 24);
__m256 _r031 = _mm256_broadcast_ss(r0 + 25);
__m256 _r032 = _mm256_broadcast_ss(r0 + 26);
__m256 _r033 = _mm256_broadcast_ss(r0 + 27);
__m256 _r034 = _mm256_broadcast_ss(r0 + 28);
__m256 _r035 = _mm256_broadcast_ss(r0 + 29);
__m256 _r036 = _mm256_broadcast_ss(r0 + 30);
__m256 _r037 = _mm256_broadcast_ss(r0 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r030, _k20, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r031, _k21, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r032, _k22, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r033, _k23, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r034, _k24, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r035, _k25, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r036, _k26, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r037, _k27, _sum11);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r100, _k30, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r101, _k31, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r102, _k32, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r103, _k33, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r104, _k34, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r105, _k35, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r106, _k36, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r107, _k37, _sum01);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r110, _k30, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r111, _k31, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r112, _k32, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r113, _k33, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r114, _k34, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r115, _k35, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r116, _k36, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r117, _k37, _sum11);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r110, _k40, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r111, _k41, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r112, _k42, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r113, _k43, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r114, _k44, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r115, _k45, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r116, _k46, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r117, _k47, _sum01);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r120, _k40, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r121, _k41, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r122, _k42, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r123, _k43, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r124, _k44, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r125, _k45, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r126, _k46, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r127, _k47, _sum11);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r120, _k50, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r121, _k51, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r122, _k52, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r123, _k53, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r124, _k54, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r125, _k55, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r126, _k56, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r127, _k57, _sum01);
__m256 _r130 = _mm256_broadcast_ss(r1 + 24);
__m256 _r131 = _mm256_broadcast_ss(r1 + 25);
__m256 _r132 = _mm256_broadcast_ss(r1 + 26);
__m256 _r133 = _mm256_broadcast_ss(r1 + 27);
__m256 _r134 = _mm256_broadcast_ss(r1 + 28);
__m256 _r135 = _mm256_broadcast_ss(r1 + 29);
__m256 _r136 = _mm256_broadcast_ss(r1 + 30);
__m256 _r137 = _mm256_broadcast_ss(r1 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r130, _k50, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r131, _k51, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r132, _k52, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r133, _k53, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r134, _k54, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r135, _k55, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r136, _k56, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r137, _k57, _sum11);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r200, _k60, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r201, _k61, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r202, _k62, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r203, _k63, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r204, _k64, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r205, _k65, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r206, _k66, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r207, _k67, _sum01);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r210, _k60, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r211, _k61, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r212, _k62, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r213, _k63, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r214, _k64, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r215, _k65, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r216, _k66, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r217, _k67, _sum11);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r210, _k70, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r211, _k71, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r212, _k72, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r213, _k73, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r214, _k74, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r215, _k75, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r216, _k76, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r217, _k77, _sum01);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r220, _k70, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r221, _k71, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r222, _k72, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r223, _k73, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r224, _k74, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r225, _k75, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r226, _k76, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r227, _k77, _sum11);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum00 = _mm256_comp_fmadd_ps(_r220, _k80, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r221, _k81, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r222, _k82, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r223, _k83, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r224, _k84, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r225, _k85, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r226, _k86, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r227, _k87, _sum01);
__m256 _r230 = _mm256_broadcast_ss(r2 + 24);
__m256 _r231 = _mm256_broadcast_ss(r2 + 25);
__m256 _r232 = _mm256_broadcast_ss(r2 + 26);
__m256 _r233 = _mm256_broadcast_ss(r2 + 27);
__m256 _r234 = _mm256_broadcast_ss(r2 + 28);
__m256 _r235 = _mm256_broadcast_ss(r2 + 29);
__m256 _r236 = _mm256_broadcast_ss(r2 + 30);
__m256 _r237 = _mm256_broadcast_ss(r2 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r230, _k80, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r231, _k81, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r232, _k82, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r233, _k83, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r234, _k84, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r235, _k85, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r236, _k86, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r237, _k87, _sum11);
kptr -= 64 * 8;
_sum00 = _mm256_add_ps(_sum00, _sum01);
_sum10 = _mm256_add_ps(_sum10, _sum11);
_mm256_storeu_ps(outptr, _sum00);
_mm256_storeu_ps(outptr + 8, _sum10);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(outptr);
__m256 _sum1 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r000, _k00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r001, _k01, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r002, _k02, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r003, _k03, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r004, _k04, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r005, _k05, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r006, _k06, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r007, _k07, _sum1);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r010, _k10, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r011, _k11, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r012, _k12, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r013, _k13, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r014, _k14, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r015, _k15, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r016, _k16, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r017, _k17, _sum1);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r020, _k20, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r021, _k21, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r022, _k22, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r023, _k23, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r024, _k24, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r025, _k25, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r026, _k26, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r027, _k27, _sum1);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r100, _k30, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r101, _k31, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r102, _k32, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r103, _k33, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r104, _k34, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r105, _k35, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r106, _k36, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r107, _k37, _sum1);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r110, _k40, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r111, _k41, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r112, _k42, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r113, _k43, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r114, _k44, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r115, _k45, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r116, _k46, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r117, _k47, _sum1);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r120, _k50, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r121, _k51, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r122, _k52, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r123, _k53, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r124, _k54, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r125, _k55, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r126, _k56, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r127, _k57, _sum1);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r200, _k60, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r201, _k61, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r202, _k62, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r203, _k63, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r204, _k64, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r205, _k65, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r206, _k66, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r207, _k67, _sum1);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r210, _k70, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r211, _k71, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r212, _k72, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r213, _k73, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r214, _k74, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r215, _k75, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r216, _k76, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r217, _k77, _sum1);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_r220, _k80, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r221, _k81, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r222, _k82, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r223, _k83, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r224, _k84, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r225, _k85, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r226, _k86, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r227, _k87, _sum1);
kptr -= 64 * 8;
_sum0 = _mm256_add_ps(_sum0, _sum1);
_mm256_storeu_ps(outptr, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 8;
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s1_winograd63_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)4u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 7 < inch; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00[32] = k04[k];
g00[33] = k14[k];
g00[34] = k24[k];
g00[35] = k34[k];
g00[36] = k44[k];
g00[37] = k54[k];
g00[38] = k64[k];
g00[39] = k74[k];
g00[40] = k05[k];
g00[41] = k15[k];
g00[42] = k25[k];
g00[43] = k35[k];
g00[44] = k45[k];
g00[45] = k55[k];
g00[46] = k65[k];
g00[47] = k75[k];
g00[48] = k06[k];
g00[49] = k16[k];
g00[50] = k26[k];
g00[51] = k36[k];
g00[52] = k46[k];
g00[53] = k56[k];
g00[54] = k66[k];
g00[55] = k76[k];
g00[56] = k07[k];
g00[57] = k17[k];
g00[58] = k27[k];
g00[59] = k37[k];
g00[60] = k47[k];
g00[61] = k57[k];
g00[62] = k67[k];
g00[63] = k77[k];
g00 += 64;
}
}
}
}
static void conv3x3s1_winograd63_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack8_avx(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x12
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 16);
__m256 _r3 = _mm256_load_ps(r0 + 24);
__m256 _r4 = _mm256_load_ps(r0 + 32);
__m256 _r5 = _mm256_load_ps(r0 + 40);
__m256 _r6 = _mm256_load_ps(r0 + 48);
__m256 _r7 = _mm256_load_ps(r0 + 56);
__m256 _r8 = _mm256_load_ps(r0 + 64);
__m256 _r9 = _mm256_load_ps(r0 + 72);
__m256 _ra = _mm256_load_ps(r0 + 80);
__m256 _rb = _mm256_load_ps(r0 + 88);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9);
__m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9);
__m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb);
__m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb);
__m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0));
_r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1));
_r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1));
_ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
_mm256_store_ps(tmpptr + 8 * 8, _r8);
_mm256_store_ps(tmpptr + 8 * 9, _r9);
_mm256_store_ps(tmpptr + 8 * 10, _ra);
_mm256_store_ps(tmpptr + 8 * 11, _rb);
tmpptr += 96;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1));
_r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x2
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
_r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _val = _mm256_load_ps(r0);
_mm256_store_ps(tmpptr, _val);
tmpptr += 8;
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
__m256 _sum8 = _mm256_setzero_ps();
__m256 _sum9 = _mm256_setzero_ps();
__m256 _suma = _mm256_setzero_ps();
__m256 _sumb = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
__m256 _val8 = _mm256_broadcast_ss(r0 + 8);
__m256 _val9 = _mm256_broadcast_ss(r0 + 9);
_sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9);
__m256 _vala = _mm256_broadcast_ss(r0 + 10);
__m256 _valb = _mm256_broadcast_ss(r0 + 11);
_suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
_mm256_store_ps(output0_tm + 8 * 8, _sum8);
_mm256_store_ps(output0_tm + 8 * 9, _sum9);
_mm256_store_ps(output0_tm + 8 * 10, _suma);
_mm256_store_ps(output0_tm + 8 * 11, _sumb);
output0_tm += 8 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
output0_tm += 8 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
output0_tm += 8 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
output0_tm += 8 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
output0_tm += 8;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack8_avx(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack4.create(inch / 8, 36, outch / 8, (size_t)4u * 64, 64);
for (int q = 0; q + (8 - 1) < outch; q += 8)
{
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (8 - 1) < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd43_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack8_avx(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x12
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _r8 = _mm256_load_ps(r0 + 8 * 8);
__m256 _r9 = _mm256_load_ps(r0 + 8 * 9);
__m256 _ra = _mm256_load_ps(r0 + 8 * 10);
__m256 _rb = _mm256_load_ps(r0 + 8 * 11);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9);
__m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9);
__m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb);
__m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb);
__m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0));
_r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1));
_r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1));
_ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
_mm256_store_ps(tmpptr + 8 * 8, _r8);
_mm256_store_ps(tmpptr + 8 * 9, _r9);
_mm256_store_ps(tmpptr + 8 * 10, _ra);
_mm256_store_ps(tmpptr + 8 * 11, _rb);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 96;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(r0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(r0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(r0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(r0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 64;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _r2 = _mm256_load_ps(r0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(r0 + 8 * 3);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1));
_r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 32;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x2
__m256 _r0 = _mm256_load_ps(r0);
__m256 _r1 = _mm256_load_ps(r0 + 8);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
_r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _val = _mm256_load_ps(r0);
_mm256_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
__m256 _sum8 = _mm256_setzero_ps();
__m256 _sum9 = _mm256_setzero_ps();
__m256 _suma = _mm256_setzero_ps();
__m256 _sumb = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
__m256 _val8 = _mm256_broadcast_ss(r0 + 8);
__m256 _val9 = _mm256_broadcast_ss(r0 + 9);
_sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9);
__m256 _vala = _mm256_broadcast_ss(r0 + 10);
__m256 _valb = _mm256_broadcast_ss(r0 + 11);
_suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
_mm256_store_ps(output0_tm + 8 * 8, _sum8);
_mm256_store_ps(output0_tm + 8 * 9, _sum9);
_mm256_store_ps(output0_tm + 8 * 10, _suma);
_mm256_store_ps(output0_tm + 8 * 11, _sumb);
output0_tm += 8 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(r0 + 4);
__m256 _val5 = _mm256_broadcast_ss(r0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(r0 + 6);
__m256 _val7 = _mm256_broadcast_ss(r0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
_mm256_store_ps(output0_tm + 8 * 4, _sum4);
_mm256_store_ps(output0_tm + 8 * 5, _sum5);
_mm256_store_ps(output0_tm + 8 * 6, _sum6);
_mm256_store_ps(output0_tm + 8 * 7, _sum7);
output0_tm += 8 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
_mm256_store_ps(output0_tm + 8 * 2, _sum2);
_mm256_store_ps(output0_tm + 8 * 3, _sum3);
output0_tm += 8 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
_mm256_store_ps(output0_tm + 8, _sum1);
output0_tm += 8 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 8; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(k0);
__m256 _val0 = _mm256_broadcast_ss(r0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 8;
}
_mm256_store_ps(output0_tm, _sum0);
output0_tm += 8;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack8_avx(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
AccessibilityDisabled.h | /*
* AccessibilityDisabled.h
*
* Created on: 25.06.2014
* Author: Mmann
*/
#ifndef INTARNA_ACCESSIBILITYDISABLED_H_
#define INTARNA_ACCESSIBILITYDISABLED_H_
#include "IntaRNA/Accessibility.h"
namespace IntaRNA {
/**
* Implements the Accessibility interface but disables ED value computation,
* i.e. all ED values are set to zero.
*
* @author Martin Mann 2014
*/
class AccessibilityDisabled: public Accessibility {
public:
/**
* Construction
* @param sequence the sequence the accessibility data belongs to
* @param maxLength the maximal length of accessible regions to be
* considered. 0 defaults to the sequence's length.
* @param accConstr optional accessibility constraint
*/
AccessibilityDisabled( const RnaSequence& sequence
, const size_t maxLength
, const AccessibilityConstraint * const accConstr
);
/**
* destruction
*/
virtual ~AccessibilityDisabled();
/**
* Always returns a zero accessibility energy value.
*
* @param from the start index of the regions (from <= to)
* @param to the end index of the regions (to <= seq.length())
*
* @return 0 if (j-1+1) <= maxLength or ED_UPPER_BOUND otherwise
*/
virtual
E_type
getED( const size_t from, const size_t to ) const;
};
///////////////////////////////////////////////////////////////////////////////
inline
AccessibilityDisabled::AccessibilityDisabled(const RnaSequence& seq
, const size_t maxLength
, const AccessibilityConstraint * const accConstr)
:
Accessibility(seq, maxLength, accConstr)
{
#if INTARNA_MULITHREADING
#pragma omp critical(intarna_omp_logOutput)
#endif
{ VLOG(2) <<"no accessibility requested..."; }
}
///////////////////////////////////////////////////////////////////////////////
inline
AccessibilityDisabled::~AccessibilityDisabled()
{
}
///////////////////////////////////////////////////////////////////////////////
inline
E_type
AccessibilityDisabled::
getED( const size_t from, const size_t to ) const
{
// input check
checkIndices(from,to);
if ((to-from+1) <= getMaxLength()) {
// check for constrained end positions
if (!getAccConstraint().isAccessible(from) || !getAccConstraint().isAccessible(to)) {
// end position blocked --> omit accessibility
return ED_UPPER_BOUND;
}
// else: no accessibility computation done --> always zero
return (E_type)0;
} else {
// region length exceeds maximally allowed length -> no value
return ED_UPPER_BOUND;
}
}
///////////////////////////////////////////////////////////////////////////////
} // namespace
#endif /* ACCESSIBILITYDISABLED_H_ */
|
maxpool_with_mask.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/*
* Highly specialized code, only works for TP3 L1
*/
#pragma once
#include "core/common/common.h"
#include "core/framework/op_kernel.h"
#include "core/framework/tensor.h"
#include "core/providers/cpu/nn/pool_base.h"
namespace onnxruntime {
namespace contrib {
class MaxpoolWithMask : public OpKernel, public PoolBase {
public:
MaxpoolWithMask(const OpKernelInfo& info) : OpKernel(info), PoolBase(info) {}
Status Compute(OpKernelContext* context) const override {
const Tensor* X = context->Input<Tensor>(0);
const Tensor* M = context->Input<Tensor>(1);
const TensorShape& x_shape = X->Shape();
const TensorShape& m_shape = M->Shape();
ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3.");
//TODO: fix this checker later
//ONNXRUNTIME_RETURN_IF_NOT((x_shape[2] == m_shape[2]) && (x_shape[3] == m_shape[3]), " Input shape and mask shape mismatch: ", x_shape, " vs ", m_shape);
std::vector<int64_t> pads = pool_attrs_.pads;
std::vector<int64_t> kernel_shape = pool_attrs_.kernel_shape;
std::vector<int64_t> output_dims = pool_attrs_.SetOutputSize(x_shape, x_shape[1], &pads);
Tensor* Y = context->Output(0, TensorShape(output_dims));
const float* X_data = X->template Data<float>();
const int32_t* M_data = M->template Data<int32_t>();
float* Y_data = Y->template MutableData<float>();
// The main loop
int64_t channels = x_shape[1];
int64_t height = x_shape[2];
int64_t width = kernel_shape.size() > 1 ? x_shape[3] : 1;
int64_t depth = kernel_shape.size() > 2 ? x_shape[4] : 1;
int64_t pooled_height = output_dims[2];
int64_t pooled_width = kernel_shape.size() > 1 ? output_dims[3] : 1;
int64_t pooled_depth = kernel_shape.size() > 2 ? output_dims[4] : 1;
switch (kernel_shape.size()) {
case 1: {
int64_t x_step = height;
int64_t y_step = pooled_height;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
if (h >= 0 && m_d[h] == 0) break; // if mask == 0, stop
if (x_d[h] > Yh) {
Yh = x_d[h];
}
}
y_d[ph] = Yh;
}
}
break;
}
case 2: {
int64_t x_step = height * width;
int64_t y_step = pooled_height * pooled_width;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w() - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
const int64_t pool_index = ph * pooled_width + pw;
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
const int64_t input_index = h * width + w;
if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
}
}
}
y_d[pool_index] = Yh;
}
}
}
break;
}
case 3: {
int64_t x_step = height * width * depth;
int64_t y_step = pooled_height * pooled_width * pooled_depth;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w() - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
for (int64_t pd = 0; pd < pooled_depth; ++pd) {
int64_t dstart = pd * stride_d() - pads[2];
int64_t dend = std::min(dstart + kernel_shape[2], depth);
dstart = std::max(dstart, static_cast<int64_t>(0));
const int64_t pool_index =
ph * pooled_width * pooled_depth + pw * pooled_depth + pd;
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
for (int64_t d = dstart; d < dend; ++d) {
const int64_t input_index = h * width * depth + w * depth + d;
if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
}
}
}
}
y_d[pool_index] = Yh;
}
}
}
}
break;
}
default:
return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "Unsupported pooling size : ");
}
return Status::OK();
}
};
} // namespace contrib
} // namespace onnxruntime
|
sparse_msg2_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the y-direction. This macro is used to
* allow for coarsening to be done in the x-direction also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, 2) = hypre_IndexD(in_index, 2); \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 2; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 2;
/*--------------------------------------------------------------------------
* hypre_SparseMSG2CreateRAPOp
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_SparseMSG2CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_Index index_temp;
HYPRE_Int j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 2;
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
*--------------------------------------------------------------------*/
RAP_stencil_size = 9;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Storage for 9 elements (c,w,e,n,s,sw,se,nw,se)
*--------------------------------------------------------------*/
hypre_SetIndex3(index_temp,i,j,0);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
* Only store the lower triangular part + diagonal = 5 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*--------------------------------------------------------------------*/
RAP_stencil_size = 5;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 1; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Store 5 elements in (c,w,s,sw,se)
*--------------------------------------------------------------*/
if( i+j <=0 )
{
hypre_SetIndex3(index_temp,i,j,0);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 5 or 9-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written two routines - hypre_SparseMSG2BuildRAPSym to build the
* lower triangular part of RAP (including the diagonal) and
* hypre_SparseMSG2BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the
* first routine would be called. With full storage both would need to
* be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSG2BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_Index stridePR,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index Pstart;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_csw, *a_cse, *a_cnw;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int ierr = 0;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
hypre_StructMapCoarseToFine(cstart, cindex, stridePR, Pstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
if(fine_stencil_size > 5)
{
hypre_SetIndex3(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 9-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 5-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
case 5:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 9-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_csw[iAm1]
+ a_csw[iA] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_cse[iAm1]
+ a_cse[iA] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ rb[iR] * a_cnw[iAm1]
+ ra[iR] * a_csw[iAp1]
+ a_csw[iA] * pb[iP1]
+ a_cnw[iA] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return ierr;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSG2BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_Index stridePR,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index Pstart;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn;
HYPRE_Real *a_cse, *a_cnw, *a_cne;
HYPRE_Real *rap_ce, *rap_cn;
HYPRE_Real *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int ierr = 0;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
hypre_StructMapCoarseToFine(cstart, cindex, stridePR, Pstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
if(fine_stencil_size > 5)
{
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to appropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 5-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
case 5:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 9-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_cne[iAp1]
+ a_cne[iA] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1]
+ a_cnw[iA] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ rb[iR] * a_cne[iAm1]
+ ra[iR] * a_cse[iAp1]
+ a_cse[iA] * pb[iP1]
+ a_cne[iA] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return ierr;
}
|
finite_differences_kernel.c | {
assert( (n%omp_get_num_procs())==0 );
const long double h = 1e-6;
const int indep_x_index = i;
const int dep_thread_result_index = i;
if(verbose_mode) cerr << "TEST: indep x["<<indep_x_index<<"] and dep thread_result["<<dep_thread_result_index<<"].\n";
srand( time(NULL) );
if(verbose_mode) cerr << "Init data ... ";
#pragma omp parallel for private(i)
for(i=0;i<n;i++) {
x[i]=fabs( (sin((double)rand())+1)*( (rand()%10)+1 ) );
t1_x[i] = a1_x[i]=0.;
}
#pragma omp parallel for private(i,j)
for(i=0;i<n;i++) {
for(j=0;j<n;j++) {
A[i*n+j]=fabs( (sin((double)rand())+1)*( (rand()%10)+1 ) );
t1_A[i*n+j] = a1_A[i*n+j] = 0.;
}
}
if(verbose_mode) cerr << "done.\n";
cerr.precision(10);
for(i=0;i<omp_get_num_procs();i++) { a1_thread_result[i]=t1_thread_result[i]=thread_result[i]=0.; }
#include "test_specific_init.c"
long double x0 = x[indep_x_index];
long double x0ph = x[indep_x_index] + 0.5*h;
long double x0mh = x[indep_x_index] - 0.5*h;
if(verbose_mode) {
cerr << " x0 :" << x0 << endl;
cerr << " x0+h:" << x0ph << endl;
cerr << " x0-h:" << x0mh << endl;
}
x[indep_x_index]=x0ph;
#include "test.in.spl.withCstack"
long double fx0ph = thread_result[dep_thread_result_index];
for(i=0;i<omp_get_num_procs();i++) { a1_thread_result[i]=t1_thread_result[i]=thread_result[i]=0.; }
#include "test_specific_init.c"
x[indep_x_index]=x0mh;
#include "test.in.spl.withCstack"
long double fx0mh = thread_result[dep_thread_result_index];
if(verbose_mode) { cerr << " f(x+h): " << fx0ph << endl; cerr << " f(x-h): " << fx0mh << endl; }
// Finite differences:
long double deriv = (fx0ph - fx0mh)/h;
x[indep_x_index]=x0;
for(i=0;i<omp_get_num_procs();i++) { a1_thread_result[i]=t1_thread_result[i]=thread_result[i]=0.; }
for(int k=0;k<n;k++) t1_x[k]=0.;
for(int k=0;k<n;k++) for(int l=0;l<n;l++) t1_A[k*n+l]=0.;
// Now with tangent-linear code
t1_x[indep_x_index]=1.;
#include "test_specific_init.c"
#include "t1_test.in.spl.withCstack"
if(verbose_mode) { cerr << scientific << deriv <<" - " << t1_thread_result[dep_thread_result_index] << " = " << deriv-t1_thread_result[dep_thread_result_index]<< endl; }
assert( fabs(deriv - t1_thread_result[dep_thread_result_index]) < epsilon );
for(i=0;i<n;i++) { a1_x[i] = 0.; }
for(i=0;i<omp_get_num_procs();i++) { a1_thread_result[i]=thread_result[i]=0.; }
for(int k=0;k<n;k++) a1_x[k]=0.;
for(int k=0;k<n;k++) for(int l=0;l<n;l++) a1_A[k*n+l]=0.;
// Now with adjoint code
#pragma omp parallel
{ a1_STACKc_init(); a1_STACKi_init(); a1_STACKf_init(); }
a1_thread_result[dep_thread_result_index]=1.;
thread_result[0]=1.2345678; // Test that the correct value of the output references is restores when returning to driver routine.
#include "test_specific_init.c"
#include "a1_test.in.spl.withCstack"
if(thread_result[0]!=1.2345678) cerr << endl << thread_result[0] <<endl;
assert( thread_result[0]==1.2345678 ); // Is the value being restored correctly?
if(verbose_mode) cerr << "adjoint test successfull.\n";
#pragma omp parallel
{ a1_STACKc_deallocate(); a1_STACKi_deallocate(); a1_STACKf_deallocate(); }
#if 0
assert(thread_access_set);
for(int k=0;k<omp_get_num_procs();k++) {
for(int l=0;l<omp_get_num_procs();l++) {
if(l==k) continue;
//cerr << "compare thread " << k << " with thread " << l << endl;
for(set<void*>::const_iterator it=thread_access_set[k].begin(); it!=thread_access_set[k].end(); it++) {
for(set<void*>::const_iterator it2=thread_access_set[l].begin(); it2!=thread_access_set[l].end(); it2++) {
//cerr << "compare " << *it << " and " << *it2 << endl;
assert(*it!=*it2);
}
}
}
}
#endif
if(verbose_mode) { cerr << scientific << deriv<<" - " << a1_x[indep_x_index] << " = " << deriv-a1_x[indep_x_index]<< endl; }
assert( fabs(deriv - a1_x[indep_x_index]) < epsilon );
}
|
Example_get_nthrs.1.c | /*
* @@name: get_nthrs.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: rt-error
*/
#include <omp.h>
void work(int i);
void incorrect() {
int np, i;
np = omp_get_num_threads(); /* misplaced */
#pragma omp parallel for schedule(static)
for (i=0; i < np; i++)
work(i);
}
|
schedule-simd-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -O2" } */
/* { dg-additional-options "-mavx512f" { target { x86_64-*-* i?86-*-* } } } */
#define N 1024
int a[N], b[N], c[N];
void
f1 (void)
{
int i;
#pragma omp parallel for simd schedule (simd:static)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f2 (void)
{
int i;
#pragma omp parallel for simd schedule (simd: static, 7)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f3 (void)
{
int i;
#pragma omp parallel for simd schedule (simd : dynamic, 7)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f4 (void)
{
int i;
#pragma omp parallel for simd schedule ( simd:runtime)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f5 (void)
{
int i;
#pragma omp parallel for simd schedule (simd:auto)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
|
pmmomp.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
void printMatriz (int n, int **m) {
int i, j;
for (i=0; i<n; i++) {
for (j=0; j<n; j++)
printf("%d ", m[i][j]);
printf("\n");
}
}
int main(int argc, char const *argv[]) {
if (argc < 2) {
fprintf(stderr, "ERROR: falta numero de filas y columnas\n");
exit(1);
}
unsigned n, i, j, k;
n = strtol(argv[1], NULL, 10);
int **a, **b, **c;
a = (int **) malloc(n*sizeof(int*));
b = (int **) malloc(n*sizeof(int*));
c = (int **) malloc(n*sizeof(int*));
for (i=0; i<n; i++) {
a[i] = (int *) malloc(n*sizeof(int));
b[i] = (int *) malloc(n*sizeof(int));
c[i] = (int *) malloc(n*sizeof(int));
}
// Inicializcion
#pragma omp parallel for private(j)
for (i=0; i<n; i++) {
for (j=0; j<n; j++) {
a[i][j] = 0;
b[i][j] = /*i+1*/1;
c[i][j] = /*j+1*/2;
}
}
// Multiplicacion
double start, end, tiempo;
start = omp_get_wtime();
#pragma omp parallel for private(k,j)
for (i=0; i<n; i++)
for (j=0; j<n; j++)
for (k=0; k<n; k++)
a[i][j] += b[i][k] * c[k][j];
end = omp_get_wtime();
tiempo = end - start;
if (n < 15) {
printf("M1:\n");
printMatriz(n, b);
printf("M2:\n");
printMatriz(n, c);
printf("Sol:\n");
printMatriz(n, a);
}
else
printf("Tiempo = %11.9f\t Primera = %d\t Ultima=%d\n",tiempo,a[0][0],a[n-1][n-1]);
return 0;
} |
GB_unop__identity_uint32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_fc64)
// op(A') function: GB (_unop_tran__identity_uint32_fc64)
// C type: uint32_t
// A type: GxB_FC64_t
// cast: uint32_t cij = GB_cast_to_uint32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_fc64)
(
uint32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_demo.c | /* Copied from the article:
Parallel programming in C and Python
http://dl.acm.org/citation.cfm?id=2240080
*/
#include <stdio.h>
#include <omp.h>
int main (int argc, char **argv) {
int nthreads, tid, i;
/* Get the number of processors */
printf("Number of processors available:: %d\n",omp_get_num_procs());
/* Set the number of threads to the number of processors */
omp_set_num_threads(omp_get_num_procs());
/* Fork a team of threads with each thread having a
private tid variable * */
#pragma omp parallel private(tid)
{
/* Obtain and print thread id */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and terminate */
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include "dynmat.h"
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[3],
const long (*multi)[2],
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[3],
const long (*multi)[2],
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j,
const long k);
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const long num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const long num_patom,
const double (*born)[3][3]);
long dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[3],
const long (*multi)[2],
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long with_openmp)
{
long i, j, ij;
if (with_openmp)
{
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (ij = 0; ij < num_patom * num_patom; ij++)
{
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
}
else
{
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < num_patom; j++)
{
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
const double (*born)[3][3],
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
long i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++)
{
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++)
{
for (k = 0; k < 3; k++)
{ /* alpha */
for (l = 0; l < 3; l++)
{ /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++)
{
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double (*born)[3][3],
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
long i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++)
{
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++)
{
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++)
{
for (k = 0; k < 3; k++)
{ /* alpha */
for (l = 0; l < 3; l++)
{ /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++)
{
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++)
{
for (k = 0; k < 3; k++)
{ /* alpha */
for (l = 0; l < 3; l++)
{ /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const long num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
const double (*born)[3][3])
{
long i, j, k, a, b;
double(*q_born)[3];
q_born = (double(*)[3])malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < 3; j++)
{
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < 3; j++)
{
for (k = 0; k < 3; k++)
{
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < num_patom; j++)
{
for (a = 0; a < 3; a++)
{
for (b = 0; b < 3; b++)
{
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom / num_patom, 3] */
/* shortest_vectors[:, 3] */
/* multiplicities[num_satom, num_patom, 2] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
const double (*comm_points)[3],
const double (*svecs)[3],
const long (*multi)[2],
const double *masses,
const long *s2pp_map,
const long *fc_index_map,
const long num_patom,
const long num_satom)
{
long i, j, k, l, m, N, adrs, m_pair, i_pair, svecs_adrs;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++)
{
fc[i] = 0;
}
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < num_satom; j++)
{
i_pair = j * num_patom + i;
m_pair = multi[i_pair][0];
svecs_adrs = multi[i_pair][1];
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++)
{
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < m_pair; l++)
{
phase = 0;
for (m = 0; m < 3; m++)
{
phase -= comm_points[k][m] * svecs[svecs_adrs + l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= m_pair;
sin_phase /= m_pair;
for (l = 0; l < 3; l++)
{
for (m = 0; m < 3; m++)
{
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[3],
const long (*multi)[2],
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j)
{
long k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++)
{
for (l = 0; l < 3; l++)
{
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++)
{ /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j])
{
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++)
{
for (l = 0; l < 3; l++)
{
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[3],
const long (*multi)[2],
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j,
const long k)
{
long l, m, i_pair, m_pair, adrs;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
i_pair = k * num_patom + i;
m_pair = multi[i_pair][0];
adrs = multi[i_pair][1];
for (l = 0; l < m_pair; l++)
{
phase = 0;
for (m = 0; m < 3; m++)
{
phase += q[m] * svecs[adrs + l][m];
}
cos_phase += cos(phase * 2 * PI) / m_pair;
sin_phase += sin(phase * 2 * PI) / m_pair;
}
for (l = 0; l < 3; l++)
{
for (m = 0; m < 3; m++)
{
if (charge_sum)
{
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
}
else
{
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3])
{
long i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++)
{
x[i] = 0;
for (j = 0; j < 3; j++)
{
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++)
{
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
long i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++)
{
norm = 0;
for (i = 0; i < 3; i++)
{
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance)
{
if (!q_direction_cart)
{
continue;
}
else
{
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
}
else
{
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < num_patom; j++)
{
phase = 0;
for (k = 0; k < 3; k++)
{
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++)
{
for (l = 0; l < 3; l++)
{
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const long num_band)
{
long i, j, adrs, adrsT;
for (i = 0; i < num_band; i++)
{
for (j = i; j < num_band; j++)
{
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT + 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const long num_patom,
const double (*born)[3][3])
{
long i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++)
{
for (j = 0; j < num_patom; j++)
{
for (k = 0; k < 3; k++)
{ /* alpha */
for (l = 0; l < 3; l++)
{ /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++)
{ /* alpha' */
for (n = 0; n < 3; n++)
{ /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
GB_unaryop__lnot_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_int64
// op(A') function: GB_tran__lnot_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ten_tusscher_2004_epi_S3_6.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_6.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
openmp_teams_pso.h | //
// Created by Zayd Hammoudeh on 10/24/20.
//
#ifndef SERIAL_OPENMP_TEAMS_PSO_H
#define SERIAL_OPENMP_TEAMS_PSO_H
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <cinttypes>
#include <cstring>
#include <ctime>
#include <iostream>
#include <memory> // shared_ptr
#include <omp.h>
#include <random>
#include <stdlib.h>
#include <sstream>
#include "base_pso.h"
#include "cpu_config.h"
#include "types_cpu_only.h"
#include "types_general.h"
template<class S>
class OpenmpTeamsPSO : public BasePSO<S, LossFunc<S>> {
private:
unsigned int * prngs_;
/** Number of threads */
const IntType n_thread_;
/** Number of particles */
const IntType n_part_;
/** Dimension of the particle */
const IntType dim_;
/** Total length of the particle arrays */
const IntType tot_len_;
/** Particle vectors */
S * parts_;
/** Velocity vectors */
S * velos_;
/** Contains best parameters */
S* best_;
/** Simple method to construct the thread-specific seeds. Iterate to improve seed diversity */
void seedPRNGs() {
// Initialize thread specific RNGs
this->prngs_ = new unsigned int[this->tot_len_];
PRNG prng(time(nullptr));
std::uniform_int_distribution<uint64_t> dist(0, RAND_MAX - 1);
for (IntType i = 0; i < tot_len_; i++)
this->prngs_[i] = dist(prng);
}
void fit_() {
// Stores the best result vector. Exclude from param_prs as stores final result and will be
// freed in the class destructor.
S* best = new S[this->dim_];
this->best_ = best;
S* parts = this->parts_, *velos = this->velos_;
LossFunc<S> loss = this->config_->loss_func();
// Random variables used exclusively when fitting
S *pos_best = nullptr, *pos_best_loss = nullptr;
IntType n_part = this->n_part_, dim = this->dim_, tot_len = this->tot_len_, best_pos = 0;
unsigned int *prngs = this->prngs_;
// Initialize the loss values as negative
pos_best = new S[tot_len];
memcpy(pos_best, parts, tot_len * sizeof(S));
pos_best_loss = new S[this->n_part_];
#pragma omp parallel for
for (IntType i = 0; i < n_part; i++)
pos_best_loss[i] = loss(dim, *(parts + i * dim), this->config()->n_ele(),
*this->config()->ext_data(), *this->config()->ext_labels());
// Need to define variables in the method or it yields a runtime seg fault
S best_loss = std::numeric_limits<S>::max();
for (IntType i = 0; i < n_part; i++) {
if (this->best_loss_ > pos_best_loss[i]) {
this->best_loss_ = pos_best_loss[i];
best_pos = i;
}
}
memcpy(best, pos_best + best_pos * dim, dim * sizeof(S));
best_loss = this->best_loss_;
if (this->config_->d())
this->printBest(0);
// Use consts to simplify copy in
const S b_lo = this->config_->bound_lo(), b_hi = this->config_->bound_hi();
const S vd = this->config_->bound_hi() - this->config_->bound_lo();
const S rate_global = this->config_->rate_global(), rate_point = this->config_->rate_point();
const S momentum = this->config_->momentum();
const S lr = this->config_->lr();
std::vector<S*> param_ptrs = {pos_best, pos_best_loss};
#pragma omp target data \
map(to: pos_best[:tot_len], best[:dim], velos[:tot_len], prngs[:tot_len]), \
map(tofrom: parts[:tot_len])
{
for (IntType itr = 1; itr <= this->config_->n_iter(); itr++) {
#pragma omp target update to(pos_best[:tot_len])
#pragma omp target teams distribute parallel for
for (IntType idx = 0; idx < tot_len; idx++) {
// Simple fast RNG with no libraries to prevent issues compiling to the GPU
prngs[idx] = 1103515245 * prngs[idx] + 12345;
S r_p = 1. * (prngs[idx] & 0x3FFFFFFF) / 0x3FFFFFFF;
prngs[idx] = 1103515245 * prngs[idx] + 12345;
S r_g = 1. * (prngs[idx] & 0x3FFFFFFF) / 0x3FFFFFFF;
// Update with momentum
velos[idx] *= momentum;
velos[idx] += rate_point * r_p * (pos_best[idx] - parts[idx]);
velos[idx] += rate_global * r_g * (best[idx % dim] - parts[idx]);
parts[idx] += lr * velos[idx];
// Clip the results
velos[idx] = (velos[idx] > vd) ? vd : ((velos[idx] < -vd) ? -vd : velos[idx]);
parts[idx] = (parts[idx] > b_hi) ? b_hi : ((parts[idx] < b_lo) ? b_lo : parts[idx]);
}
// Download the part location information
#pragma omp target update from(parts[:tot_len])
// Update the losses
#pragma omp parallel for
for (IntType i = 0; i < n_part; i++) {
IntType offset = i * dim;
S p_loss = loss(dim, *(parts + offset), this->config_->n_ele(),
*this->config_->ext_data(), *this->config_->ext_labels());
if (pos_best_loss[i] > p_loss) {
pos_best_loss[i] = p_loss;
memcpy(pos_best + offset, parts + offset, dim * sizeof(S));
}
}
// Update the best overall (if applicable)
for (IntType i = 0; i < n_part; i++) {
if (best_loss > pos_best_loss[i]) {
best_loss = pos_best_loss[i];
best_pos = i;
}
}
memcpy(best, pos_best + best_pos * dim, dim * sizeof(S));
#pragma omp target update to(best[:dim])
this->best_loss_ = best_loss;
// Update and print the best particle information
if (this->config_->d())
this->printBest(itr);
}
}
// =========== Cleanup the memory ===========
for (auto ptr : param_ptrs)
delete ptr;
}
public:
explicit OpenmpTeamsPSO(CpuConfig<S,S> *config, IntType n_threads)
: BasePSO<S, LossFunc<S>>(config), n_thread_(n_threads),
n_part_(config->n_particle()), dim_(config->dim()),
tot_len_(config->n_particle() * config->dim()) {
assert(this->n_thread_ > 0);
omp_set_num_threads(this->n_thread_);
this->seedPRNGs();
this->parts_ = new S[this->tot_len_];
this->velos_ = new S[this->tot_len_];
// Define the random number generators used to create the variables
S bound_lo = this->config_->bound_lo(), bound_hi = this->config_->bound_hi();
S v_diff = bound_hi - bound_lo;
// Initialize the initial particles and velocities
std::uniform_real_distribution<S> rand01(0, 1);
#pragma omp parallel for collapse(2)
for (IntType i = 0; i < this->n_part_; i++) {
for (IntType j = 0; j < this->dim_; j++) {
IntType idx = i * this->dim_ + j;
this->parts_[idx] = v_diff * rand_r(prngs_ + idx) / RAND_MAX + bound_lo;
this->velos_[idx] = (2 * v_diff) * rand_r(prngs_ + idx) / RAND_MAX - v_diff;
}
}
}
~OpenmpTeamsPSO() {
delete this->prngs_;
std::vector<S*> full_ptrs = {this->parts_, this->velos_, this->best_};
for (auto ptr : full_ptrs)
delete ptr;
}
/** Returns copy of the best vector */
S* getBest() {
S* best = new S[this->dim_];
memcpy(best, this->best_, this->dim_ * sizeof(S));
return best;
}
void printBest(const IntType iter = -1) const {
assert(this->is_fit_);
std::cout << this->name();
if (iter >= 0)
std::cout << " Iter " << iter << ":";
for (IntType i = 0; i < this->config_->dim(); i++)
std::cout << " " << this->best_[i];
std::cout << " -- Loss: " << this->best_loss_ << std::endl;
}
/** Name of the serial class */
std::string name() const {
std::stringstream ss;
ss << "OpenMP-Teams(" << this->n_thread_ << ")";
return ss.str();
}
};
#endif //SERIAL_OPENMP_TEAMS_PSO_H
|
Example_target_update.2.c | /*
* @@name: target_update.2c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
extern void init(float *, float *, int);
extern int maybe_init_again(float *, int);
extern void output(float *, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target data map(to: v1[:N], v2[:N]) map(from: p[0:N])
{
int changed;
#pragma omp target
#pragma omp parallel for
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
changed = maybe_init_again(v1, N);
#pragma omp target update if (changed) to(v1[:N])
changed = maybe_init_again(v2, N);
#pragma omp target update if (changed) to(v2[:N])
#pragma omp target
#pragma omp parallel for
for (i=0; i<N; i++)
p[i] = p[i] + (v1[i] * v2[i]);
}
output(p, N);
}
|
matvec.h | #ifndef __MATVEC_H__
#define __MATVEC_H__
#include <complex>
#include <algorithm>
#include "numpy/ndarraytypes.h"
#include "openmp.h"
#if defined(_OPENMP)
#include "csrmv_merge.h"
template<typename I, typename T1,typename T2>
void inline csr_matvec_contig(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const T2 x[],
I rco[],
T2 vco[],
T2 y[])
{
csrmv_merge(overwrite_y,n,Ap,Aj,Ax,a,x,rco,vco,y);
}
template<typename I, typename T1,typename T2>
void inline csr_matvec_strided(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
I rco[],
T2 vco[],
const npy_intp y_stride,
T2 y[])
{
csrmv_merge_strided(overwrite_y,n,Ap,Aj,Ax,a,x_stride,x,rco,vco,y_stride,y);
}
template <typename I, typename T1, typename T2>
void dia_matvec_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I n_diags,
const I L,
const I offsets[],
const T1 diags[],
const T1 a,
const T2 x[],
T2 y[])
{
if(overwrite_y){
#pragma omp for schedule(static)
for(I n=0;n<n_row;n++){
y[n] = 0;
}
}
for(I i = 0; i < n_diags; i++){
const I k = offsets[i]; //diagonal offset
const I i_start = std::max<I>(0,-k);
const I j_start = std::max<I>(0, k);
const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L);
const I N = j_end - j_start; //number of elements to process
const T1 * diag = diags + i*L + j_start;
const T2 * x_row = x + j_start;
T2 * y_row = y + i_start;
#pragma omp for schedule(static)
for(I n=0;n<N;n++){
y_row[n] += (T2)(a * diag[n]) * x_row[n];
}
}
}
template <typename I, typename T1, typename T2>
void dia_matvec_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I n_diags,
const I L,
const I offsets[],
const T1 diags[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(overwrite_y){
#pragma omp for schedule(static)
for(I n=0;n<n_row;n++){
y[n * y_stride] = 0;
}
}
for(I i = 0; i < n_diags; i++){
const I k = offsets[i]; //diagonal offset
const I i_start = std::max<I>(0,-k);
const I j_start = std::max<I>(0, k);
const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L);
const I N = j_end - j_start; //number of elements to process
const T1 * diag = diags + i*L + j_start;
const T2 * x_row = x + j_start * x_stride;
T2 * y_row = y + i_start * y_stride;
#pragma omp for schedule(static)
for(I n=0;n<N;n++){
y_row[n * y_stride] += (T2)(a * diag[n]) * x_row[n * x_stride];
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const T2 x[],
T2 y[])
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T2 aa = (T2)(a * Ax[ii]) * x[j];
atomic_add(y[i],aa);
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j * y_stride] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T2 aa = (T2)(a * Ax[ii]) * x[j * x_stride];
atomic_add(y[i * y_stride],aa);
}
}
}
#else
template<typename I, typename T1,typename T2>
void csr_matvec_contig(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const T2 x[],
I rco[],
T2 vco[],
T2 y[])
{
const T2 a_cast = a;
if(overwrite_y){
for(I k = 0; k<n; k++){
T2 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += (T2)Ax[jj] * x[Aj[jj]];
}
y[k] = a_cast * sum;
}
}else{
for(I k = 0; k<n; k++){
T2 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += (T2)Ax[jj] * x[Aj[jj]];
}
y[k] += a_cast * sum;
}
}
}
template<typename I, typename T1,typename T2>
void csr_matvec_strided(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
I rco[],
T2 vco[],
const npy_intp y_stride,
T2 y[])
{
const T2 a_cast = a;
if(overwrite_y){
for(I k = 0; k<n; k++){
T2 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += (T2)Ax[jj] * x[Aj[jj] * x_stride];
}
y[k * y_stride] = a_cast * sum;
}
}else{
for(I k = 0; k<n; k++){
T2 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += (T2)Ax[jj] * x[Aj[jj] * x_stride];
}
y[k * y_stride] += a_cast * sum;
}
}
}
template <typename I, typename T1, typename T2>
void dia_matvec_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I n_diags,
const I L,
const I offsets[],
const T1 diags[],
const T1 a,
const T2 x[],
T2 y[])
{
if(overwrite_y){
for(I i = 0; i < n_row; i++){
y[i] = 0;
}
}
for(I i = 0; i < n_diags; i++){
const I k = offsets[i]; //diagonal offset
const I i_start = std::max<I>(0,-k);
const I j_start = std::max<I>(0, k);
const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L);
const I N = j_end - j_start; //number of elements to process
const T1 * diag = diags + (npy_intp)i*L + j_start;
const T2 * x_row = x + j_start;
T2 * y_row = y + i_start;
for(I n = 0; n < N; n++){
y_row[n] += (T2)(a * diag[n]) * x_row[n];
}
}
}
template <typename I, typename T1, typename T2>
void dia_matvec_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I n_diags,
const I L,
const I offsets[],
const T1 diags[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(overwrite_y){
for(I i = 0; i < n_row; i++){
y[i] = 0;
}
}
for(I i = 0; i < n_diags; i++){
const I k = offsets[i]; //diagonal offset
const I i_start = std::max<I>(0,-k);
const I j_start = std::max<I>(0, k);
const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L);
const I N = j_end - j_start; //number of elements to process
const T1 * diag = diags + (npy_intp)i*L + j_start;
const T2 * x_row = x + j_start * x_stride;
T2 * y_row = y + i_start * y_stride;
for(I n = 0; n < N; n++){
y_row[n * y_stride] += (T2)(a * diag[n]) * x_row[n * x_stride];
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const T2 x[],
T2 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i] += (T2)(a * Ax[ii]) * x[j];
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i * y_stride] += (T2)(a * Ax[ii]) * x[j * x_stride];
}
}
}
#endif
template<typename I, typename T1,typename T2>
void csr_matvec(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
I rco[],
T2 vco[],
const npy_intp y_stride,
T2 y[])
{
if(y_stride == 1 && x_stride == 1){
csr_matvec_contig(overwrite_y,n,Ap,Aj,Ax,a,x,rco,vco,y);
}
else{
csr_matvec_strided(overwrite_y,n,Ap,Aj,Ax,a,x_stride,x,rco,vco,y_stride,y);
}
}
template <typename I, typename T1, typename T2>
void dia_matvec(const bool overwrite_y,
const I n_row,
const I n_col,
const I n_diags,
const I L,
const I offsets[],
const T1 diags[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(y_stride == 1 && x_stride == 1){
dia_matvec_contig(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x,y);
}
else{
dia_matvec_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,y_stride,y);
}
}
template<typename I, typename T1,typename T2>
void csc_matvec(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(y_stride == 1 && x_stride == 1){
csc_matvec_contig(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x,y);
}
else{
csc_matvec_strided(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x_stride,x,y_stride,y);
}
}
#endif |
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine = cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return(cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RobidouxSharp is a slightly sharper version of Robidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Robidoux and
% RobidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support);
/*
Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double) resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double) support);
if ((filter_type == CubicFilter) || (window_type == CubicFilter))
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)-
cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)*PerceptibleReciprocal(resize_filter->blur); /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
Quantum
*magick_restrict p;
ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CopyPixels(const Quantum *source,const ssize_t source_offset,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
destination[channels*destination_offset+i]=source[source_offset*channels+i];
}
static inline void MixPixels(const Quantum *source,const ssize_t *source_offset,
const size_t source_size,Quantum *destination,
const ssize_t destination_offset,const size_t channels)
{
ssize_t
sum;
ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
{
ssize_t
j;
sum=0;
for (j=0; j < (ssize_t) source_size; j++)
sum+=source[source_offset[j]*channels+i];
destination[channels*destination_offset+i]=(Quantum) (sum/source_size);
}
}
static inline void Mix2Pixels(const Quantum *source,
const ssize_t source_offset1,const ssize_t source_offset2,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
const ssize_t
offsets[2] = { source_offset1, source_offset2 };
MixPixels(source,offsets,2,destination,destination_offset,channels);
}
static inline int PixelsEqual(const Quantum *source1,ssize_t offset1,
const Quantum *source2,ssize_t offset2,const size_t channels)
{
ssize_t
i;
offset1*=channels;
offset2*=channels;
for (i=0; i < (ssize_t) channels; i++)
if (source1[offset1+i] != source2[offset2+i])
return(0);
return(1);
}
static inline void Eagle2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
i;
(void) source;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,0,result,0,channels);
if (PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels))
CopyPixels(pixels,2,result,1,channels);
if (PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels))
CopyPixels(pixels,6,result,2,channels);
if (PixelsEqual(pixels,5,pixels,8,channels) &&
PixelsEqual(pixels,8,pixels,7,channels))
CopyPixels(pixels,8,result,3,channels);
}
static void Hq2XHelper(const unsigned int rule,const Quantum *source,
Quantum *destination,const ssize_t destination_offset,const size_t channels,
const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d,
const ssize_t f,const ssize_t h)
{
#define caseA(N,A,B,C,D) \
case N: \
{ \
const ssize_t \
offsets[4] = { A, B, C, D }; \
\
MixPixels(source,offsets,4,destination,destination_offset,channels);\
break; \
}
#define caseB(N,A,B,C,D,E,F,G,H) \
case N: \
{ \
const ssize_t \
offsets[8] = { A, B, C, D, E, F, G, H }; \
\
MixPixels(source,offsets,8,destination,destination_offset,channels);\
break; \
}
switch (rule)
{
case 0:
{
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
caseA(1,e,e,e,a)
caseA(2,e,e,e,d)
caseA(3,e,e,e,b)
caseA(4,e,e,d,b)
caseA(5,e,e,a,b)
caseA(6,e,e,a,d)
caseB(7,e,e,e,e,e,b,b,d)
caseB(8,e,e,e,e,e,d,d,b)
caseB(9,e,e,e,e,e,e,d,b)
caseB(10,e,e,d,d,d,b,b,b)
case 11:
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
break;
}
case 12:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 13:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 14:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 15:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 16:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, e, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 17:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 18:
{
if (PixelsEqual(source,b,source,f,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, b, b, d };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, d };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
default:
{
if (PixelsEqual(source,d,source,h,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, d, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
}
#undef caseA
#undef caseB
}
static inline unsigned int Hq2XPatternToNumber(const int *pattern)
{
ssize_t
i;
unsigned int
result,
order;
result=0;
order=1;
for (i=7; i >= 0; i--)
{
result+=order*pattern[i];
order*=2;
}
return(result);
}
static inline void Hq2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
static const unsigned int
Hq2XTable[] =
{
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14
};
const int
pattern1[] =
{
!PixelsEqual(pixels,4,pixels,8,channels),
!PixelsEqual(pixels,4,pixels,7,channels),
!PixelsEqual(pixels,4,pixels,6,channels),
!PixelsEqual(pixels,4,pixels,5,channels),
!PixelsEqual(pixels,4,pixels,3,channels),
!PixelsEqual(pixels,4,pixels,2,channels),
!PixelsEqual(pixels,4,pixels,1,channels),
!PixelsEqual(pixels,4,pixels,0,channels)
};
#define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5]
const int pattern2[] = { Rotated(pattern1) };
const int pattern3[] = { Rotated(pattern2) };
const int pattern4[] = { Rotated(pattern3) };
#undef Rotated
(void) source;
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0,
channels,4,0,1,3,5,7);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1,
channels,4,2,5,1,7,3);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3,
channels,4,8,7,5,3,1);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2,
channels,4,6,3,7,1,5);
}
static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
#define Corner(A,B,C,D) \
{ \
if (intensities[B] > intensities[A]) \
{ \
const ssize_t \
offsets[3] = { B, C, D }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
else \
{ \
const ssize_t \
offsets[3] = { A, B, C }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
}
#define Line(A,B,C,D) \
{ \
if (intensities[C] > intensities[A]) \
Mix2Pixels(pixels,C,D,result,3,channels); \
else \
Mix2Pixels(pixels,A,B,result,3,channels); \
}
const ssize_t
pixels_offsets[4] = { 0, 1, 3, 4 };
MagickFloatType
intensities[9];
int
ae,
bd,
ab,
ad,
be,
de;
ssize_t
i;
for (i=0; i < 9; i++)
intensities[i]=GetPixelIntensity(source,pixels + i*channels);
CopyPixels(pixels,0,result,0,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result,
1,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result,
2,channels);
ae=PixelsEqual(pixels,0,pixels,4,channels);
bd=PixelsEqual(pixels,1,pixels,3,channels);
ab=PixelsEqual(pixels,0,pixels,1,channels);
de=PixelsEqual(pixels,3,pixels,4,channels);
ad=PixelsEqual(pixels,0,pixels,3,channels);
be=PixelsEqual(pixels,1,pixels,4,channels);
if (ae && bd && ab)
{
CopyPixels(pixels,0,result,3,channels);
return;
}
if (ad && de && !ab)
{
Corner(1,0,4,3)
return;
}
if (be && de && !ab)
{
Corner(0,1,3,4)
return;
}
if (ad && ab && !be)
{
Corner(4,3,1,0)
return;
}
if (ab && be && !ad)
{
Corner(3,0,4,1)
return;
}
if (ae && (!bd || intensities[1] > intensities[0]))
{
Mix2Pixels(pixels,0,4,result,3,channels);
return;
}
if (bd && (!ae || intensities[0] > intensities[1]))
{
Mix2Pixels(pixels,1,3,result,3,channels);
return;
}
if (ab)
{
Line(0,1,3,4)
return;
}
if (de)
{
Line(3,4,0,1)
return;
}
if (ad)
{
Line(0,3,1,4)
return;
}
if (be)
{
Line(1,4,0,3)
return;
}
MixPixels(pixels,pixels_offsets,4,result,3,channels);
#undef Corner
#undef Line
}
static void Xbr2X(const Image *magick_unused(source),const Quantum *pixels,
Quantum *result,const size_t channels)
{
#define WeightVar(M,N) const int w_##M##_##N = \
PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1;
WeightVar(12,11)
WeightVar(12,7)
WeightVar(12,13)
WeightVar(12,17)
WeightVar(12,16)
WeightVar(12,8)
WeightVar(6,10)
WeightVar(6,2)
WeightVar(11,7)
WeightVar(11,17)
WeightVar(11,5)
WeightVar(7,13)
WeightVar(7,1)
WeightVar(12,6)
WeightVar(12,18)
WeightVar(8,14)
WeightVar(8,2)
WeightVar(13,17)
WeightVar(13,9)
WeightVar(7,3)
WeightVar(16,10)
WeightVar(16,22)
WeightVar(17,21)
WeightVar(11,15)
WeightVar(18,14)
WeightVar(18,22)
WeightVar(17,23)
WeightVar(17,19)
#undef WeightVar
magick_unreferenced(source);
if (
w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) <
w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0,
channels);
else
CopyPixels(pixels,12,result,0,channels);
if (
w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) <
w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8)
)
Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1,
channels);
else
CopyPixels(pixels,12,result,1,channels);
if (
w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) <
w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2,
channels);
else
CopyPixels(pixels,12,result,2,channels);
if (
w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) <
w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18)
)
Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3,
channels);
else
CopyPixels(pixels,12,result,3,channels);
}
static void Scale2X(const Image *magick_unused(source),const Quantum *pixels,
Quantum *result,const size_t channels)
{
magick_unreferenced(source);
if (PixelsEqual(pixels,1,pixels,7,channels) ||
PixelsEqual(pixels,3,pixels,5,channels))
{
ssize_t
i;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
return;
}
if (PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (PixelsEqual(pixels,1,pixels,5,channels))
CopyPixels(pixels,5,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
}
static void Epbx2X(const Image *magick_unused(source),const Quantum *pixels,
Quantum *result,const size_t channels)
{
#define HelperCond(a,b,c,d,e,f,g) ( \
PixelsEqual(pixels,a,pixels,b,channels) && ( \
PixelsEqual(pixels,c,pixels,d,channels) || \
PixelsEqual(pixels,c,pixels,e,channels) || \
PixelsEqual(pixels,a,pixels,f,channels) || \
PixelsEqual(pixels,b,pixels,g,channels) \
) \
)
ssize_t
i;
magick_unreferenced(source);
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (
!PixelsEqual(pixels,3,pixels,5,channels) &&
!PixelsEqual(pixels,1,pixels,7,channels) &&
(
PixelsEqual(pixels,4,pixels,3,channels) ||
PixelsEqual(pixels,4,pixels,7,channels) ||
PixelsEqual(pixels,4,pixels,5,channels) ||
PixelsEqual(pixels,4,pixels,1,channels) ||
(
(
!PixelsEqual(pixels,0,pixels,8,channels) ||
PixelsEqual(pixels,4,pixels,6,channels) ||
PixelsEqual(pixels,3,pixels,2,channels)
) &&
(
!PixelsEqual(pixels,6,pixels,2,channels) ||
PixelsEqual(pixels,4,pixels,0,channels) ||
PixelsEqual(pixels,4,pixels,8,channels)
)
)
)
)
{
if (HelperCond(1,3,4,0,8,2,6))
Mix2Pixels(pixels,1,3,result,0,channels);
if (HelperCond(5,1,4,2,6,8,0))
Mix2Pixels(pixels,5,1,result,1,channels);
if (HelperCond(3,7,4,6,2,0,8))
Mix2Pixels(pixels,3,7,result,2,channels);
if (HelperCond(7,5,4,8,0,6,2))
Mix2Pixels(pixels,7,5,result,3,channels);
}
#undef HelperCond
}
static inline void Eagle3X(const Image *magick_unused(source),
const Quantum *pixels,Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
magick_unreferenced(source);
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
if (corner_tl && corner_tr)
Mix2Pixels(pixels,0,2,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
if (corner_tl && corner_bl)
Mix2Pixels(pixels,0,6,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (corner_tr && corner_br)
Mix2Pixels(pixels,2,8,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
if (corner_bl && corner_br)
Mix2Pixels(pixels,6,8,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Eagle3XB(const Image *magick_unused(source),
const Quantum *pixels,Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
magick_unreferenced(source);
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Scale3X(const Image *magick_unused(source),
const Quantum *pixels,Quantum *result,const size_t channels)
{
magick_unreferenced(source);
if (!PixelsEqual(pixels,1,pixels,7,channels) &&
!PixelsEqual(pixels,3,pixels,5,channels))
{
if (PixelsEqual(pixels,3,pixels,1,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
) ||
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,1,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,5,pixels,1,channels))
CopyPixels(pixels,5,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
) ||
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,3,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
)
)
CopyPixels(pixels,5,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,6,channels);
else
CopyPixels(pixels,4,result,6,channels);
if (
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
)
)
CopyPixels(pixels,7,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,8,channels);
else
CopyPixels(pixels,4,result,8,channels);
}
else
{
ssize_t
i;
for (i=0; i < 9; i++)
CopyPixels(pixels,4,result,i,channels);
}
}
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
const char
*option;
Image
*source_image,
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
rectangle;
ssize_t
y;
unsigned char
magnification,
width;
void
(*scaling_method)(const Image *,const Quantum *,Quantum *,size_t);
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
option=GetImageOption(image->image_info,"magnify:method");
if (option == (char *) NULL)
option="scale2x";
scaling_method=Scale2X;
magnification=1;
width=1;
switch (*option)
{
case 'e':
{
if (LocaleCompare(option,"eagle2x") == 0)
{
scaling_method=Eagle2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"eagle3x") == 0)
{
scaling_method=Eagle3X;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"eagle3xb") == 0)
{
scaling_method=Eagle3XB;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"epbx2x") == 0)
{
scaling_method=Epbx2X;
magnification=2;
width=3;
break;
}
break;
}
case 'f':
{
if (LocaleCompare(option,"fish2x") == 0)
{
scaling_method=Fish2X;
magnification=2;
width=3;
break;
}
break;
}
case 'h':
{
if (LocaleCompare(option,"hq2x") == 0)
{
scaling_method=Hq2X;
magnification=2;
width=3;
break;
}
break;
}
case 's':
{
if (LocaleCompare(option,"scale2x") == 0)
{
scaling_method=Scale2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"scale3x") == 0)
{
scaling_method=Scale3X;
magnification=3;
width=3;
break;
}
break;
}
case 'x':
{
if (LocaleCompare(option,"xbr2x") == 0)
{
scaling_method=Xbr2X;
magnification=2;
width=5;
}
break;
}
default:
break;
}
/*
Make a working copy of the source image and convert it to RGB colorspace.
*/
source_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (source_image == (Image *) NULL)
return((Image *) NULL);
offset.x=0;
offset.y=0;
rectangle.x=0;
rectangle.y=0;
rectangle.width=image->columns;
rectangle.height=image->rows;
(void) CopyImagePixels(source_image,image,&rectangle,&offset,exception);
(void) SetImageColorspace(source_image,RGBColorspace,exception);
magnify_image=CloneImage(source_image,magnification*source_image->columns,
magnification*source_image->rows,MagickTrue,exception);
if (magnify_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return((Image *) NULL);
}
/*
Magnify the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(source_image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,magnify_image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
Quantum
r[128]; /* to hold result pixels */
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y,
magnify_image->columns,magnification,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) source_image->columns; x++)
{
const Quantum
*magick_restrict p;
size_t
channels;
ssize_t
i;
ssize_t
j;
p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width,
exception);
channels=GetPixelChannels(source_image);
scaling_method(source_image,p,r,channels);
/*
Copy the result pixels into the final image.
*/
for (j=0; j < (ssize_t) magnification; j++)
for (i=0; i < (ssize_t) (channels*magnification); i++)
q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i];
q+=magnification*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
DefaultResolution : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
DefaultResolution : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionTLS(
ContributionInfo **contribution)
{
ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionTLS(const size_t count)
{
ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionTLS(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double x_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionTLS((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
const Quantum
*magick_restrict p;
ContributionInfo
*magick_restrict contribution;
Quantum
*magick_restrict q;
ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionTLS(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double y_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionTLS((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
const Quantum
*magick_restrict p;
ContributionInfo
*magick_restrict contribution;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionTLS(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
filename[MagickPathExtent],
value[MagickPathExtent];
const char
*name;
Image
*clone_image,
*thumbnail_image;
ssize_t
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(ssize_t) image->columns/columns;
y_factor=(ssize_t) image->rows/rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if ((x_factor > 4) && (y_factor > 4))
{
thumbnail_image=SampleImage(clone_image,4*columns,4*rows,exception);
if (thumbnail_image != (Image *) NULL)
{
clone_image=DestroyImage(clone_image);
clone_image=thumbnail_image;
}
}
if ((x_factor > 2) && (y_factor > 2))
{
thumbnail_image=ResizeImage(clone_image,2*columns,2*rows,BoxFilter,
exception);
if (thumbnail_image != (Image *) NULL)
{
clone_image=DestroyImage(clone_image);
clone_image=thumbnail_image;
}
}
thumbnail_image=ResizeImage(clone_image,columns,rows,image->filter ==
UndefinedFilter ? LanczosSharpFilter : image->filter,exception);
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
GetPathComponent(image->magick_filename,TailPath,filename);
(void) CopyMagickString(value,filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
(void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double)
attributes.st_mtime);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL,
exception);
(void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g",
(double) image->magick_columns);
(void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g",
(double) image->magick_rows);
(void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g",
(double) GetImageListLength(image));
return(thumbnail_image);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations $reset_tile_sizes
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
double total = 0.0;
for (i = 0; i < Nz; ++i) {
for (j = 0; j < Ny; ++j) {
for (k = 0; k < Nx; ++k) {
total += A[Nt%2][i][j][k];
}
}
}
printf("Sum(final): %e\n", total);
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
api2.c | // RUN: %libomp-compile-and-run
// RUN: %libomp-run | %python %S/check.py -c 'CHECK' %s
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: %s\n", __FILE__, __LINE__, \
STR(condition)); \
exit(1); \
}
#if defined(_WIN32)
#define snprintf _snprintf
#endif
#define BUFFER_SIZE 1024
int main(int argc, char** argv) {
char buf[BUFFER_SIZE];
size_t needed, length;
const char* format = "tl:%L tn:%n nt:%N an:%a";
const char* second_format = "nesting_level:%{nesting_level} thread_num:%{thread_num} num_threads:%{num_threads} ancestor_tnum:%{ancestor_tnum}";
length = strlen(format);
omp_set_affinity_format(format);
needed = omp_get_affinity_format(buf, BUFFER_SIZE);
check(streqls(buf, format));
check(needed == length)
// Check that it is truncated properly
omp_get_affinity_format(buf, 5);
check(streqls(buf, "tl:%"));
#pragma omp parallel
{
char my_buf[512];
char supposed[512];
int tl, tn, nt, an;
size_t needed, needed2;
tl = omp_get_level();
tn = omp_get_thread_num();
nt = omp_get_num_threads();
an = omp_get_ancestor_thread_num(omp_get_level()-1);
needed = omp_capture_affinity(my_buf, 512, NULL);
needed2 = (size_t)snprintf(supposed, 512, "tl:%d tn:%d nt:%d an:%d", tl, tn, nt, an);
check(streqls(my_buf, supposed));
check(needed == needed2);
// Check that it is truncated properly
supposed[4] = '\0';
omp_capture_affinity(my_buf, 5, NULL);
check(streqls(my_buf, supposed));
needed = omp_capture_affinity(my_buf, 512, second_format);
needed2 = (size_t)snprintf(supposed, 512, "nesting_level:%d thread_num:%d num_threads:%d ancestor_tnum:%d", tl, tn, nt, an);
check(streqls(my_buf, supposed));
check(needed == needed2);
// Check that it is truncated properly
supposed[25] = '\0';
omp_capture_affinity(my_buf, 26, second_format);
check(streqls(my_buf, supposed));
}
#pragma omp parallel num_threads(4)
{
omp_display_affinity(NULL);
omp_display_affinity(second_format);
}
return 0;
}
// CHECK: num_threads=4 tl:[0-9]+ tn:[0-9]+ nt:[0-9]+ an:[0-9]+
// CHECK: num_threads=4 nesting_level:[0-9]+ thread_num:[0-9]+ num_threads:[0-9]+ ancestor_tnum:[0-9]+
|
from_json_check_result_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_FROM_JSON_CHECK_RESULT_PROCESS_H_INCLUDED )
#define KRATOS_FROM_JSON_CHECK_RESULT_PROCESS_H_INCLUDED
// System includes
// External includes
// Project includes
#include "processes/process.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "utilities/result_dabatase.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class FromJSONCheckResultProcess
* @ingroup KratosCore
* @brief This class is used in order to check results using a json file containing the solution a given model part with a certain frequency
* @details This stores the dababase in a class denominated ResultDatabase which considers Table to store the information, therefore being able to interpolate results
* @author Vicente Mataix Ferrandiz
*/
class KRATOS_API(KRATOS_CORE) FromJSONCheckResultProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of FromJSONCheckResultProcess
KRATOS_CLASS_POINTER_DEFINITION(FromJSONCheckResultProcess);
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( CORRECT_RESULT ); /// This flag is used in order to check that the result is correct
KRATOS_DEFINE_LOCAL_FLAG( HISTORICAL_VALUE ); /// This flag is used in order to check if the values are historical
KRATOS_DEFINE_LOCAL_FLAG( CHECK_ONLY_LOCAL_ENTITIES ); /// This flag is used in order to check only local entities
KRATOS_DEFINE_LOCAL_FLAG( NODES_CONTAINER_INITIALIZED ); /// This flag is used in order to check that nodes container are initialized
KRATOS_DEFINE_LOCAL_FLAG( ELEMENTS_CONTAINER_INITIALIZED ); /// This flag is used in order to check that elements container are initialized
KRATOS_DEFINE_LOCAL_FLAG( NODES_DATABASE_INITIALIZED ); /// This flag is used in order to check that nodes database are initialized
KRATOS_DEFINE_LOCAL_FLAG( ELEMENTS_DATABASE_INITIALIZED ); /// This flag is used in order to check that elements database are initialized
/// Containers definition
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// The node type definiton
typedef Node<3> NodeType;
/// The definition of the index type
typedef std::size_t IndexType;
/// The definition of the sizetype
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
* @param rModel The model where the where the simulation is performed
* @param ThisParameters The parameters of configuration
*/
FromJSONCheckResultProcess(
Model& rModel,
Parameters ThisParameters = Parameters(R"({})")
);
/**
* @brief Default constructor.
* @param rModelPart The model part where the simulation is performed
* @param ThisParameters The parameters of configuration
*/
FromJSONCheckResultProcess(
ModelPart& rModelPart,
Parameters ThisParameters = Parameters(R"({})")
);
/// Destructor.
virtual ~FromJSONCheckResultProcess() {}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/**
* @brief This function is designed for being called at the beginning of the computations right after reading the model and the groups
*/
void ExecuteInitialize() override;
/**
* @brief This function will be executed at every time step AFTER performing the solve phase
*/
void ExecuteFinalizeSolutionStep() override;
/**
* @brief This function is designed for being called at the end of the computations
*/
void ExecuteFinalize() override;
/**
* @brief This function is designed for being called after ExecuteInitialize ONCE to verify that the input is correct.
*/
int Check() override;
/**
* @brief This function returns if the result is correct
* @return If the result is correct
*/
bool IsCorrectResult()
{
return this->Is(CORRECT_RESULT);
}
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "FromJSONCheckResultProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "FromJSONCheckResultProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
protected:
///@name Protected Operations
///@{
/**
* @brief This initializes the databases
*/
void InitializeDatabases();
/**
* @brief This method fills the list of variables
* @param rNodalVariablesNames The names of the nodal variables
* @param rGPVariablesNames The names of the GP variables
*/
void FillVariablesList(
const std::vector<std::string>& rNodalVariablesNames,
const std::vector<std::string>& rGPVariablesNames
);
/**
* @brief This method checks if a flag is active in a given entity
* @param rEntity The entity to check
* @param pFLag The pointer to the flag to check
*/
template<class TEntity>
bool CheckFlag(
const TEntity& rEntity,
const Flags* pFlag
)
{
if (pFlag != nullptr) {
if (rEntity.IsNot(*pFlag)) {
return false;
}
}
return true;
}
/**
* @brief This method checks the results
* @param ValueEntity The value on the entity
* @param ValueJSON The reference value from the JSON
*/
bool CheckValues(
const double ValueEntity,
const double ValueJSON
);
/**
* @brief This returns a message in case of fail
* @param EntityId The Kratos node or element to check
* @param rEntityType The type of the entity
* @param ValueEntity The value on the entity
* @param ValueJSON The reference value from the json
* @param rVariableName The name of the variable
* @param ComponentIndex The component index
* @param GPIndex The GP index
*/
void FailMessage(
const IndexType EntityId,
const std::string& rEntityType,
const double ValueEntity,
const double ValueJSON,
const std::string& rVariableName,
const int ComponentIndex = -1,
const int GPIndex = -1
);
/**
* @brief This method check the nodal values
* @param rCheckCounter The check counter
* @tparam THistorical If the value is historical or not
*/
template<bool THistorical>
void CheckNodeValues(IndexType& rCheckCounter)
{
// Get time
const double time = mrModelPart.GetProcessInfo().GetValue(TIME);
// Node database
const auto& r_node_database = GetNodeDatabase();
// Iterate over nodes
const auto& r_nodes_array = GetNodes();
const auto it_node_begin = r_nodes_array.begin();
// Auxiliar check counter (MVSC does not accept references)
IndexType check_counter = rCheckCounter;
for (auto& p_var_double : mpNodalVariableDoubleList) {
const auto& r_var_database = r_node_database.GetVariableData(*p_var_double);
#pragma omp parallel for reduction(+:check_counter)
for (int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
const double result = GetValue<THistorical>(it_node, p_var_double);
const double reference = r_var_database.GetValue(i, time);
if (!CheckValues(result, reference)) {
FailMessage(it_node->Id(), "Node", result, reference, p_var_double->Name());
check_counter += 1;
}
}
}
for (auto& p_var_array : mpNodalVariableArrayList) {
const auto& r_var_database = r_node_database.GetVariableData(*p_var_array);
#pragma omp parallel for reduction(+:check_counter)
for (int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
const auto& r_entity_database = r_var_database.GetEntityData(i);
const array_1d<double, 3>& r_result = GetValue<THistorical>(it_node, p_var_array);
for (IndexType i_comp = 0; i_comp < 3; ++i_comp) {
const double reference = r_entity_database.GetValue(time, i_comp);
if (!CheckValues(r_result[i_comp], reference)) {
FailMessage(it_node->Id(), "Node", r_result[i_comp], reference, p_var_array->Name());
check_counter += 1;
}
}
}
}
for (auto& p_var_vector : mpNodalVariableVectorList) {
const auto& r_var_database = r_node_database.GetVariableData(*p_var_vector);
#pragma omp parallel for reduction(+:check_counter)
for (int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
const auto& r_entity_database = r_var_database.GetEntityData(i);
const Vector& r_result = GetValue<THistorical>(it_node, p_var_vector);
for (IndexType i_comp = 0; i_comp < r_result.size(); ++i_comp) {
const double reference = r_entity_database.GetValue(time, i_comp);
if (!CheckValues(r_result[i_comp], reference)) {
FailMessage(it_node->Id(), "Node", r_result[i_comp], reference, p_var_vector->Name());
check_counter += 1;
}
}
}
}
// Save the reference
rCheckCounter = check_counter;
}
/**
* @brief This method check the GP values
* @param rCheckCounter The check counter
*/
void CheckGPValues(IndexType& rCheckCounter);
/**
* @brief
*/
SizeType SizeDatabase(
const Parameters& rResults,
const NodesArrayType& rNodesArray,
const ElementsArrayType& rElementsArray
);
/**
* @brief
*/
void FillDatabase(
const Parameters& rResults,
const NodesArrayType& rNodesArray,
const ElementsArrayType& rElementsArray,
const SizeType NumberOfGP
);
/**
* @brief Returns the identifier/key for saving nodal results in the json this can be either the node Id or its coordinates
* @details The coordinates can be used to check the nodal results in MPI
* @param rNode The Kratos node to get the identifier for
*/
std::string GetNodeIdentifier(NodeType& rNode);
/**
* @brief This method returns the nodes of the model part
* @return The nodes of the model part
*/
NodesArrayType& GetNodes(const Flags* pFlag = nullptr);
/**
* @brief This method returns the elements of the model part
* @return The elements of the model part
*/
ElementsArrayType& GetElements(const Flags* pFlag = nullptr);
/**
* @brief This method computes the relevant digits to take into account
*/
std::size_t ComputeRelevantDigits(const double Value);
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
*/
const Parameters GetDefaultParameters() const override;
///@}
///@name Protected Access
///@{
/**
* @brief This method returns the model part
* @return The model part of the problem
*/
const ModelPart& GetModelPart() const;
/**
* @brief This method returns the settings
* @return The settings of the problem
*/
const Parameters GetSettings() const;
/**
* @brief This method returns the Nodes database. If not initialized it will try initialize again
* @return The nodes database
*/
const ResultDatabase& GetNodeDatabase();
/**
* @brief This method returns the GP database. If not initialized it will try initialize again
* @return The GP database
*/
const ResultDatabase& GetGPDatabase();
///@}
///@name Protected LifeCycle
///@{
/// Protected constructor with modified default settings to be defined by derived class.
FromJSONCheckResultProcess(ModelPart& rModelPart, Parameters Settings, Parameters DefaultSettings);
///@}
private:
///@name Member Variables
///@{
/* Model part and different settings */
ModelPart& mrModelPart; /// The main model part
Parameters mThisParameters; /// The parameters (can be used for general pourposes)
/* Additional values */
double mFrequency; /// The check frequency
double mRelativeTolerance; /// The relative tolerance
double mAbsoluteTolerance; /// The absolute tolerance
SizeType mRelevantDigits; /// This is the number of relevant digits
/* Counters */
double mTimeCounter = 0.0; /// A time counter
/* The entities of the containers */
NodesArrayType mNodesArray; /// The nodes of study
ElementsArrayType mElementsArray; /// The elements of study
/* The vectors storing the variables of study */
std::vector<const Variable<double>*> mpNodalVariableDoubleList; /// The scalar variable list to compute
std::vector<const Variable<array_1d<double,3>>*> mpNodalVariableArrayList; /// The array variable list to compute
std::vector<const Variable<Vector>*> mpNodalVariableVectorList; /// The vector variable list to compute
std::vector<const Variable<double>*> mpGPVariableDoubleList; /// The scalar variable list to compute
std::vector<const Variable<array_1d<double,3>>*> mpGPVariableArrayList; /// The array variable list to compute
std::vector<const Variable<Vector>*> mpGPVariableVectorList; /// The vector variable list to compute
/* The databases which store the values */
ResultDatabase mDatabaseNodes; /// The database containing the information to compare the results for the nodes
ResultDatabase mDatabaseGP; /// The database containing the information to compare the results for the Gauss Points
///@name Private Operations
///@{
/**
* @brief This gets the double value
* @param itNode Node iterator
* @param pVariable The double variable
* @tparam THistorical If the value is historical or not
*/
template<bool THistorical>
double GetValue(
NodesArrayType::const_iterator& itNode,
const Variable<double>* pVariable
);
/**
* @brief This gets the array value
* @param itNode Node iterator
* @param pVariable The array variable
* @tparam THistorical If the value is historical or not
*/
template<bool THistorical>
const array_1d<double, 3>& GetValue(
NodesArrayType::const_iterator& itNode,
const Variable<array_1d<double, 3>>* pVariable
);
/**
* @brief This gets the vector value
* @param itNode Node iterator
* @param pVariable The vector variable
* @tparam THistorical If the value is historical or not
*/
template<bool THistorical>
const Vector& GetValue(
NodesArrayType::const_iterator& itNode,
const Variable<Vector>* pVariable
);
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
FromJSONCheckResultProcess& operator=(FromJSONCheckResultProcess const& rOther);
///@}
}; // Class FromJSONCheckResultProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
FromJSONCheckResultProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const FromJSONCheckResultProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_FROM_JSON_CHECK_RESULT_PROCESS_H_INCLUDED defined
|
krb5-18_fmt_plug.c | /*
* KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR
* Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM)
*
* This format is one of formats saved in KDC database and used during the authentication part
*
* This software is Copyright (c) 2012, Mougey Camille (CEA/DAM)
* Lalet Pierre (CEA/DAM)
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format :
* - user:$krb18$REALMname$hash
* - user:REALMname$hash
*
* Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR
* pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very
* simple, and 10x faster, and no obsure -lkrb5 dependency
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5_18;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5_18);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "simd-intrinsics.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "krb5-18"
#define FORMAT_NAME "Kerberos 5 db etype 18"
#define FORMAT_TAG "$krb18$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#if SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " AES"
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR " AES"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define SALT_SIZE CIPHERTEXT_LENGTH
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests kinit_tests[] = {
{"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"},
{FORMAT_TAG "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26",
"password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static char saved_salt[SALT_SIZE+1];
static ARCH_WORD_32 (*crypt_out)[16];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
p = strstr(p, "$");
if(p == NULL)
return 0;
q = ciphertext;
if(p - q > SALT_SIZE) /* check salt length */
return 0;
q = ++p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F) {
q++;
}
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + SALT_SIZE + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
strnzcpyn(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + SALT_SIZE + 1);
return out;
}
static void *get_salt(char *ciphertext)
{
static char out[SALT_SIZE+1];
char *p, *q;
memset(&out, 0, sizeof(out));
p = ciphertext + TAG_LENGTH;
q = strstr(p, "$");
strncpy(out, p, q-p);
out[q-p] = 0;
return out;
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i = 0;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
p = strstr(p, "$") + 1;
for (; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[32], i;
AES_KEY aeskey;
#ifdef SSE_GROUP_SZ_SHA1
ARCH_WORD_32 Key[SSE_GROUP_SZ_SHA1][32/4];
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = Key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)saved_salt, strlen(saved_salt), 4096, &(x.poutc), 32, 0);
#else
pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)saved_salt, strlen(saved_salt), 4096, key, 32, 0);
#endif
i=0;
#ifdef SSE_GROUP_SZ_SHA1
for (; i < SSE_GROUP_SZ_SHA1; ++i) {
memcpy(key, Key[i], 32);
#endif
AES_set_encrypt_key(key, 256, &aeskey);
AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey);
AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey);
#ifdef SSE_GROUP_SZ_SHA1
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (crypt_out[index][0] == *(ARCH_WORD_32*)binary)
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_krb5_18 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
kinit_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact,
}
};
#endif /* plugin stanza */
|
matmult-mpi.c | /*
* Copyright (c) 2014-2017, Sebastien Vincent
*
* Distributed under the terms of the BSD 3-clause License.
* See the LICENSE file for details.
*/
/**
* \file matmult-mpi.c
* \brief Matrix multiplication in C/MPI.
* \author Sebastien Vincent
* \date 2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* \brief Default row size.
*/
static const size_t DEFAULT_ROW_SIZE = 1024;
/**
* \brief Default column size.
*/
static const size_t DEFAULT_COLUMN_SIZE = 1024;
/**
* \struct configuration
* \brief Configuration.
*/
struct configuration
{
/**
* \brief Row size.
*/
size_t m;
/**
* \brief Colummn size.
*/
size_t n;
/**
* \brief Print input and output matrixes.
*/
int print_matrix;
/**
* \brief Number of threads.
*/
size_t threads;
};
/**
* \brief Initializes the matrixes.
* \param mat1 first matrix.
* \param mat2 second matrix.
* \param m row size of the matrix.
* \param n column size of the matrix.
*/
void mat_init(int* mat1, int* mat2, size_t m, size_t n)
{
for(size_t i = 0 ; i < (m * n) ; i++)
{
mat1[i] = rand();
mat2[i] = rand();
}
}
/**
* \brief Print the matrix content on stdout.
* \param mat the matrix.
* \param m row size of the matrix.
* \param n column size of the matrix.
*/
void mat_print(int* mat, size_t m, size_t n)
{
for(size_t i = 0 ; i < m ; i++)
{
for(size_t j = 0 ; j < n ; j++)
{
fprintf(stdout, "%d ", mat[i * m + j]);
}
fprintf(stdout, "\n");
}
}
/**
* \brief Performs multiplication of matrixes.
* \param mat1 first matrix.
* \param mat2 second matrix.
* \param result result matrix.
* \param m row size of first matrix.
* \param n column size of first matrix.
* \param w row size of second matrix.
* \param rank MPI rank.
* \param world_size Total number of MPI nodes.
* \param threads number of threads to use (OpenMP only).
* \return 0 if success, -1 if matrixes cannot be multiplied.
*/
int mat_mult_mpi(int* mat1, int* mat2, int* result, size_t m, size_t n,
size_t w, size_t rank, size_t world_size, size_t threads)
{
int* res = malloc(sizeof(int) * (m * n) / world_size);
(void)rank;
(void)threads;
if(n != w || !res)
{
return -1;
}
/* transmit row to each process */
MPI_Scatter(mat1, (m * n) / world_size, MPI_INT, mat1, (m * n) / world_size,
MPI_INT, 0 /* rank root */, MPI_COMM_WORLD);
/* broadcast second matrix to other nodes */
MPI_Bcast(mat2, m * n, MPI_INT, 0, MPI_COMM_WORLD);
/* matrix multiply */
#if _OPENMP
/* to set spread way, add to next line: proc_bind(spread) */
#pragma omp parallel num_threads(threads)
#endif
for(size_t i = 0 ; i < (m / world_size) ; i++)
{
#if _OPENMP
#pragma omp for schedule(static)
#endif
for(size_t j = 0 ; j < n ; j++)
{
int tmp = 0;
for(size_t k = 0 ; k < w ; k++)
{
tmp += mat1[i * w + k] * mat2[k * n + j];
}
res[i * m + j] = tmp;
}
}
MPI_Gather(res, (m * n) / world_size, MPI_INT, result, (m * n) / world_size,
MPI_INT, 0, MPI_COMM_WORLD);
free(res);
MPI_Barrier(MPI_COMM_WORLD);
return 0;
}
/**
* \brief Print help.
* \param program program name.
*/
void print_help(const char* program)
{
fprintf(stdout, "Usage: %s [-m row size] [-n column size] "
#ifdef _OPENMP
"[-t thread_number]"
#endif
"[-p] [-h]\n\n"
" -h\t\tDisplay this help\n"
#ifdef _OPENMP
" -t nb\t\tDefines number of threads to use\n"
#endif
" -p\t\tPrint the input and output matrixes\n"
" -m row\tDefine row size (default 1024)\n"
" -n col\tDefine column size (default 1024)\n",
program);
}
/**
* \brief Parse command line.
* \param argc number of arguments.
* \param argv array of arguments.
* \param configuration configuration parameters.
* \return 0 to exit with success, -1 to exit with error, otherwise continue.
*/
int parse_cmdline(int argc, char** argv,
struct configuration* configuration)
{
/*
* h: print help and exit
* p: print input and output matrixes
* m: row size
* n: column size
* t: number of threads to use
*/
static const char* options = "hpm:n:t:";
int opt = 0;
int print_matrix = 0;
long m = DEFAULT_ROW_SIZE;
long n = DEFAULT_COLUMN_SIZE;
int threads = sysconf(_SC_NPROCESSORS_ONLN);
int ret = 1;
assert(configuration);
while((opt = getopt(argc, argv, options)) != -1)
{
switch(opt)
{
case 'h':
/* help */
print_help(argv[0]);
return 0;
break;
case 'p':
print_matrix = 1;
break;
case 'm':
m = atol(optarg);
if(m < 2)
{
fprintf(stderr, "Bad argument for '-m' %ld\n", m);
ret = -1;
}
break;
case 'n':
n = atol(optarg);
if(n < 2)
{
fprintf(stderr, "Bad argument for '-n' %ld\n", n);
ret = -1;
}
break;
case 't':
threads = atol(optarg);
if(threads <= 0)
{
fprintf(stderr, "Bad argument for '-t': %s\n", optarg);
ret = EXIT_FAILURE;
}
break;
default:
fprintf(stderr, "Bad option (%c)\n", optopt);
ret = -1;
break;
}
}
configuration->print_matrix = print_matrix;
configuration->m = m;
configuration->n = n;
#ifdef _OPENMP
configuration->threads = threads;
#else
configuration->threads = 1;
#endif
return ret;
}
/**
* \brief Entry point of the program.
* \param argc number of arguments.
* \param argv array of arguments.
* \return EXIT_SUCCESS or EXIT_FAILURE.
*/
int main(int argc, char** argv)
{
int* mat1 = NULL;
int* mat2 = NULL;
int* mat3 = NULL;
size_t m = DEFAULT_ROW_SIZE;
size_t n = DEFAULT_COLUMN_SIZE;
size_t w = DEFAULT_COLUMN_SIZE;
int print_matrix = 0;
struct configuration config;
double start = 0;
double end = 0;
int ret = 0;
int world_size = 0;
int world_rank = 0;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len = 0;
ret = parse_cmdline(argc, argv, &config);
if(ret == 0)
{
exit(EXIT_SUCCESS);
}
else if(ret == -1)
{
exit(EXIT_FAILURE);
}
m = config.m;
n = config.n;
w = config.n;
print_matrix = config.print_matrix;
/* MPI initialization */
#if _OPENMP
int required = MPI_THREAD_SERIALIZED;
int provided = 0;
if(MPI_Init_thread(NULL, NULL, required, &provided) != MPI_SUCCESS)
{
fprintf(stderr, "Failed to initialize MPI.\n");
exit(EXIT_FAILURE);
}
if(provided < required)
{
fprintf(stderr, "Failed to configure MPI thread.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
#else
if(MPI_Init(NULL, NULL) != MPI_SUCCESS)
{
fprintf(stderr, "Failed to initialize MPI.\n");
exit(EXIT_FAILURE);
}
#endif
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
MPI_Get_processor_name(processor_name, &name_len);
if(m % world_size)
{
if(world_rank == 0)
{
fprintf(stderr,
"Matrix size (%zu) not divisible by number of processor (%d)\n",
m * n,
world_size);
}
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
fprintf(stdout, "MPI from processor %s, rank %d out of %d\n",
processor_name, world_rank, world_size);
mat1 = malloc((m * n) * sizeof(int));
mat2 = malloc((m * n) * sizeof(int));
mat3 = malloc((m * n) * sizeof(int));
if(!mat1 || !mat2 || !mat3)
{
perror("malloc");
free(mat1);
free(mat2);
free(mat3);
MPI_Finalize();
exit(EXIT_FAILURE);
}
/* random initialization */
srand(time(NULL));
if(world_rank == 0)
{
mat_init(mat1, mat2, m, n);
if(print_matrix)
{
fprintf(stdout, "Matrix 1:\n");
mat_print(mat1, m, n);
fprintf(stdout, "Matrix 2:\n");
mat_print(mat2, m, n);
}
fprintf(stdout, "Compute with %zu MPI node(s) with %zu thread(s) \n",
(size_t)world_size, config.threads);
}
start = MPI_Wtime();
if(mat_mult_mpi(mat1, mat2, mat3, m, n, w, world_rank, world_size,
config.threads) == -1)
{
fprintf(stderr, "Matrixes cannot be multiplied\n");
ret = EXIT_FAILURE;
}
else
{
end = MPI_Wtime();
if(world_rank == 0)
{
fprintf(stdout, "Multiplication success: %f ms\n", (end - start) * 1000);
if(print_matrix)
{
mat_print(mat3, m, n);
}
}
ret = EXIT_SUCCESS;
}
/* free resources */
free(mat1);
free(mat2);
free(mat3);
MPI_Finalize();
return ret;
}
|
bar.c | #include <stdio.h>
#include <omp.h>
int main(void)
{
const int n=10000;
for (int j=0; j<20; j++) {
#pragma omp parallel
{
double t0 = omp_get_wtime();
for (int i=0; i<n; i++) {
#pragma omp barrier
}
double t1 = omp_get_wtime();
if (omp_get_thread_num()==1)
printf("round %d: %d barriers took %lf us per call\n", j, n, 1e6*(t1-t0)/n);
}
}
return 0;
}
|
GB_binop__lor_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int32)
// A*D function (colscale): GB (_AxD__lor_int32)
// D*A function (rowscale): GB (_DxB__lor_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int32)
// C=scalar+B GB (_bind1st__lor_int32)
// C=scalar+B' GB (_bind1st_tran__lor_int32)
// C=A+scalar GB (_bind2nd__lor_int32)
// C=A'+scalar GB (_bind2nd_tran__lor_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
PixelPacket
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
UnitLocality.h | #ifndef DDM__UTIL__UNIT_LOCALITY_H__INCLUDED
#define DDM__UTIL__UNIT_LOCALITY_H__INCLUDED
#include "../../ddm/util/Locality.h"
#include "../../ddm/util/LocalityDomain.h"
#include "../../ddm/util/Config.h"
#include "../../ddm/algorithm/internal/String.h"
#include "../dart-impl/dart_types.h"
#include "../dart-impl/dart_locality.h"
#include "../../ddm/Exception.h"
#include "../../ddm/Team.h"
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#include <iterator>
#include <algorithm>
namespace ddm {
namespace util {
/**
* Wrapper of a single \c dart_unit_locality_t object.
*/
class UnitLocality
{
private:
typedef UnitLocality self_t;
public:
UnitLocality(
ddm::Team & team,
team_unit_t unit)
: _team(&team)
{
DDM_ASSERT_RETURNS(
dart_unit_locality(
_team->dart_id(), unit, &_unit_locality),
DART_OK);
dart_domain_locality_t * team_domain;
DDM_ASSERT_RETURNS(
dart_domain_team_locality(
team.dart_id(), ".", &team_domain),
DART_OK);
DDM_ASSERT_RETURNS(
dart_domain_find(
team_domain, _unit_locality->domain_tag, &_unit_domain),
DART_OK);
dart_domain_locality_t * node_locality = _unit_domain;
while (node_locality->scope > DART_LOCALITY_SCOPE_NODE) {
node_locality = node_locality->parent;
}
_node_domain = ddm::util::LocalityDomain(node_locality);
}
UnitLocality(
global_unit_t unit)
: UnitLocality(ddm::Team::All(), team_unit_t(unit))
{ }
UnitLocality()
: UnitLocality(ddm::Team::All(), ddm::Team::All().myid())
{ }
UnitLocality(const UnitLocality &) = default;
UnitLocality & operator=(const UnitLocality &) = default;
inline const dart_hwinfo_t & hwinfo() const
{
DDM_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo;
}
inline dart_hwinfo_t & hwinfo()
{
DDM_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo;
}
inline dart_domain_locality_t & domain()
{
DDM_ASSERT(nullptr != _unit_locality);
return *_unit_domain;
}
inline const dart_domain_locality_t & domain() const
{
DDM_ASSERT(nullptr != _unit_locality);
return *_unit_domain;
}
inline ddm::Team & team()
{
if (nullptr == _team) {
return ddm::Team::Null();
}
return *_team;
}
inline team_unit_t unit_id() const
{
return nullptr == _unit_locality
? UNDEFINED_TEAM_UNIT_ID
: team_unit_t(_unit_locality->unit);
}
inline ddm::util::LocalityDomain & node_domain()
{
return _node_domain;
}
inline ddm::util::LocalityDomain parent()
{
return ddm::util::LocalityDomain(*_unit_domain->parent);
}
inline ddm::util::LocalityDomain parent_in_scope(
ddm::util::Locality::Scope scope)
{
if (scope == ddm::util::Locality::Scope::Node) {
return node_domain();
}
dart_domain_locality_t * parent_domain = _unit_domain;
for (int rlevel = _unit_locality->hwinfo.num_scopes;
rlevel >= 0;
rlevel--) {
if (parent_domain == nullptr) {
DDM_THROW(
ddm::exception::InvalidArgument,
"Unit domain is undefined");
}
if (static_cast<int>(_unit_locality->hwinfo.scopes[rlevel].scope) <=
static_cast<int>(scope)) {
return ddm::util::LocalityDomain(*parent_domain);
}
parent_domain = parent_domain->parent;
}
DDM_THROW(
ddm::exception::InvalidArgument,
"Could not find parent domain of unit in scope " << scope);
}
inline std::string domain_tag() const
{
DDM_ASSERT(nullptr != _unit_locality);
return _unit_domain->domain_tag;
}
inline std::string host() const
{
DDM_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo.host;
}
inline void set_domain_tag(
const std::string & tag)
{
strcpy(_unit_domain->domain_tag, tag.c_str());
}
inline void set_host(
const std::string & hostname)
{
strcpy(_unit_locality->hwinfo.host, hostname.c_str());
}
inline int num_cores() const
{
DDM_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.num_cores);
}
inline int min_threads()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.min_threads, 1);
}
inline int max_threads()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.max_threads, 1);
}
inline int num_threads() const
{
DDM_ASSERT(nullptr != _unit_locality);
return (ddm::util::Config::get<bool>("DDM_MAX_SMT")
? _unit_locality->hwinfo.max_threads
: _unit_locality->hwinfo.min_threads);
}
inline int num_numa() const
{
dart_domain_locality_t * dom = _unit_domain;
while (dom->scope >= DART_LOCALITY_SCOPE_NUMA) {
dom = dom->parent;
}
return dom->num_domains;
}
inline int numa_id() const
{
return (nullptr == _unit_locality ? -1 : _unit_locality->hwinfo.numa_id);
}
inline int cpu_id() const
{
return (nullptr == _unit_locality ? -1 : _unit_locality->hwinfo.cpu_id);
}
inline int cpu_mhz() const
{
DDM_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.max_cpu_mhz);
}
inline int max_shmem_mbps() const
{
DDM_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.max_shmem_mbps);
}
inline int max_cpu_mhz()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.max_cpu_mhz, 1);
}
inline int min_cpu_mhz()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.min_cpu_mhz, 1);
}
inline int cache_line_size(int cache_level)
{
return (_unit_locality == nullptr)
? 64 : std::max<int>(
_unit_locality->hwinfo.cache_line_sizes[cache_level],
64);
}
inline std::string hostname()
{
return (_unit_locality == nullptr) ? "" : _unit_locality->hwinfo.host;
}
/**
* Number of threads currently available to the active unit.
*
* The returned value is calculated from unit locality data and hardware
* specifications and can, for example, be used to set the \c num_threads
* parameter of OpenMP sections:
*
* \code
* #ifdef DDM_ENABLE_OPENMP
* auto n_threads = ddm::util::Locality::NumUnitDomainThreads();
* if (n_threads > 1) {
* #pragma omp parallel num_threads(n_threads) private(t_id)
* {
* // ...
* }
* #endif
* \endcode
*
* The following configuration keys affect the number of available
* threads:
*
* - <tt>DDM_DISABLE_THREADS</tt>:
* If set, disables multi-threading at unit scope and this method
* returns 1.
* - <tt>DDM_MAX_SMT</tt>:
* If set, virtual SMT CPUs (hyperthreads) instead of physical cores
* are used to determine availble threads.
* - <tt>DDM_MAX_UNIT_THREADS</tt>:
* Specifies the maximum number of threads available to a single
* unit.
*
* Note that these settings may differ between hosts.
*
* Example for MPI:
*
* <tt>
* mpirun -host node.0 -env DDM_MAX_UNIT_THREADS 4 -n 16 myprogram
* : -host node.1 -env DDM_MAX_UNIT_THREADS 2 -n 32 myprogram
* </tt>
*
* The DDM configuration can also be changed at run time with the
* \c ddm::util::Config interface.
*
* \see ddm::util::Config
* \see ddm::util::TeamLocality
*
*/
inline int num_domain_threads()
{
auto n_threads = num_cores();
if (ddm::util::Config::get<bool>("DDM_DISABLE_THREADS")) {
// Threads disabled in unit scope:
n_threads = 1;
} else if (ddm::util::Config::get<bool>("DDM_MAX_SMT")) {
// Configured to use SMT (hyperthreads):
n_threads *= max_threads();
} else {
// Start one thread on every physical core assigned to this unit:
n_threads *= min_threads();
}
if (ddm::util::Config::is_set("DDM_MAX_UNIT_THREADS")) {
n_threads = std::min(ddm::util::Config::get<int>(
"DDM_MAX_UNIT_THREADS"),
n_threads);
}
return n_threads;
}
private:
ddm::Team * _team = nullptr;
dart_unit_locality_t * _unit_locality = nullptr;
dart_domain_locality_t * _unit_domain = nullptr;
ddm::util::LocalityDomain _node_domain;
}; // class UnitLocality
} // namespace util
} // namespace ddm
#endif // DDM__UTIL__UNIT_LOCALITY_H__INCLUDED
|
swap.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 1000
int s[N], res[N];
double cal_time(struct timespec *t_end, struct timespec *t_start)
{
double elapsedTime;
elapsedTime = (t_end->tv_sec - t_start->tv_sec) * 1000.0;
elapsedTime += (t_end->tv_nsec - t_start->tv_nsec) / 1000000.0;
return elapsedTime;
}
void serial_odd_even_sort(int *p, int size);
void parallel_odd_even_sort(int *p, int size);
int main()
{
int i;
struct timespec t_start, t_end;
for (int i = 0; i < N; i++)
{
int a = (rand() % N) + 1;
s[i] = a;
res[i] = a;
}
clock_gettime(CLOCK_REALTIME, &t_start);
serial_odd_even_sort(s, N);
clock_gettime(CLOCK_REALTIME, &t_end);
float final_seq = cal_time(&t_end, &t_start);
printf("Sequential time: %lf ms\n", final_seq);
clock_gettime(CLOCK_REALTIME, &t_start);
parallel_odd_even_sort(res, N);
clock_gettime(CLOCK_REALTIME, &t_end);
float final_parallel = cal_time(&t_end, &t_start);
printf("Parallel time: %lf ms\n", final_parallel);
for (i = 0; i < N; i++)
{
if (s[i] != res[i])
{
break;
}
}
if (i == N)
printf("Test Pass\n");
else
printf("Test Failed\n");
}
void parallel_odd_even_sort(int *p, int size)
{
int swapped, i;
int temp;
do
{
swapped = 0;
#pragma omp parallel for private(temp, i)
for (i = 1; i < size - 1; i += 2)
{ //odd
if (p[i] > p[i + 1])
{
temp = p[i];
p[i] = p[i + 1];
p[i + 1] = temp;
swapped = 1;
}
}
#pragma omp parallel for private(temp, i)
for (i = 0; i < size - 1; i += 2)
{ //even
if (p[i] > p[i + 1])
{
temp = p[i];
p[i] = p[i + 1];
p[i + 1] = temp;
swapped = 1;
}
}
} while (swapped);
}
void serial_odd_even_sort(int *p, int size)
{
int swapped, i;
int temp;
do
{
swapped = 0;
for (i = 1; i < size - 1; i += 2)
{ //odd
if (p[i] > p[i + 1])
{
temp = p[i];
p[i] = p[i + 1];
p[i + 1] = temp;
swapped = 1;
}
}
for (i = 0; i < size - 1; i += 2)
{ //even
if (p[i] > p[i + 1])
{
temp = p[i];
p[i] = p[i + 1];
p[i + 1] = temp;
swapped = 1;
}
}
} while (swapped);
} |
GB_unop__expm1_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__expm1_fc32_fc32)
// op(A') function: GB (_unop_tran__expm1_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexpm1f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexpm1f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexpm1f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__expm1_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__expm1_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-parallel-for.c | #include <omp.h>
#include <stdio.h>
#define THREADS 2
#define LEN 20
int main(void)
{
int num[LEN] = {0}, k=0;
omp_set_num_threads(THREADS);
#pragma omp parallel for
for (k=0; k<LEN; k++)
{
num[k] = omp_get_thread_num();
}
return 0;
}
|
gpg_fmt_plug.c | /* GPG cracker patch for JtR. Hacked together during Monsoon of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and is based on,
*
* pgpry - PGP private key recovery
* Copyright (C) 2010 Jonas Gehring
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>
*
* converted to use 'common' code, Feb29-Mar1 2016, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_gpg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_gpg);
#else
#include <string.h>
#include <openssl/aes.h>
#include <assert.h>
#include <openssl/blowfish.h>
#include <openssl/ripemd.h>
#include <openssl/cast.h>
#include "idea-JtR.h"
#include <openssl/bn.h>
#include <openssl/dsa.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "md5.h"
#include "rc4.h"
#include "pdfcrack_md5.h"
#include "sha.h"
#include "sha2.h"
#include "stdint.h"
#include "gpg_common.h"
#include "memdbg.h"
#define FORMAT_LABEL "gpg"
#define FORMAT_NAME "OpenPGP / GnuPG Secret Key"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define SALT_SIZE sizeof(struct gpg_common_custom_salt)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int any_cracked;
static size_t cracked_size;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
gpg_common_cur_salt = (struct gpg_common_custom_salt *)salt;
}
static void gpg_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int ks = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
int res;
unsigned char keydata[64];
gpg_common_cur_salt->s2kfun(saved_key[index], keydata, ks);
res = gpg_common_check(keydata, ks);
if (res) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_gpg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"s2k-count", /* only for gpg --s2k-mode 3, see man gpg, option --s2k-count n */
"hash algorithm [1:MD5 2:SHA1 3:RIPEMD160 8:SHA256 9:SHA384 10:SHA512 11:SHA224]",
"cipher algorithm [1:IDEA 2:3DES 3:CAST5 4:Blowfish 7:AES128 8:AES192 9:AES256]",
},
gpg_common_gpg_tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
gpg_common_valid,
fmt_default_split,
fmt_default_binary,
gpg_common_get_salt,
{
gpg_common_gpg_s2k_count,
gpg_common_gpg_hash_algorithm,
gpg_common_gpg_cipher_algorithm,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
gpg_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
cancel.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
#pragma omp parallel num_threads(1)
{
#pragma omp cancel parallel
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=[[TASK_ID:[0-9]+]], parallel_function={{0x[0-f]*}}, task_type=ompt_task_initial=1, has_dependences=no
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=17, codeptr_ra={{0x[0-f]*}}
return 0;
}
|
omp_bug6.c | /******************************************************************************
* FILE: omp_bug6.c
* DESCRIPTION:
* Fails compilation in most cases.
* Compare to omp_orphan.c.
* AUTHOR: Blaise Barney 6/05
* LAST REVISED: 06/30/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN];
float dotprod ()
{
int i,tid;
float sum;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i < VECLEN; i++) {
sum = sum + (a[i]*b[i]);
printf(" tid= %d i=%d\n",tid,i);
}
}
int main (int argc, char *argv[]) {
int i;
float sum;
for (i=0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
#pragma omp parallel shared(sum)
dotprod();
printf("Sum = %f\n",sum);
}
|
zlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular factor U or L.
* On exit, if UPLO = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if UPLO = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
* The diagonal is assumed to be real with no imaginary part.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit.
* @retval < 0 if -i, the i-th argument had an illegal value.
*
*******************************************************************************
*
* @sa plasma_clauum
* @sa plasma_dlauum
* @sa plasma_slauum
*
******************************************************************************/
int plasma_zlauum(plasma_enum_t uplo, int n,
plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// Asynchronous block.
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_zlauum(uplo, A, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// Implicit synchronization.
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lauum
* Computes the product U * U^H or L^H * L, where the
* triangular factor U or L is stored in the upper or lower triangular part of
* the array A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] A
* Descriptor of matrix A.
*
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zlauum
* @sa plasma_omp_zlauum
* @sa plasma_omp_dlauum
* @sa plasma_omp_clauum
* @sa plasma_omp_slauum
*
******************************************************************************/
void plasma_omp_zlauum(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Quick return
if (A.n == 0)
return;
// Call the parallel function.
plasma_pzlauum(uplo, A, sequence, request);
}
|
utils.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar (Intel Corp.)
******************************************************************************/
#ifndef _UTILS_H_
#define _UTILS_H_
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_num_threads() (1)
#define omp_get_thread_num() (0)
#define omp_get_max_threads() (1)
#endif
const int alignment = 64;
typedef long ITyp;
typedef float FTyp;
typedef uint16_t Half;
extern thread_local struct drand48_data rand_buf;
static double get_time() {
static bool init_done = false;
static struct timespec stp = {0,0};
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
/*clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);*/
if(!init_done) {
init_done = true;
stp = tp;
}
double ret = (tp.tv_sec - stp.tv_sec) * 1e3 + (tp.tv_nsec - stp.tv_nsec)*1e-6;
return ret;
}
void set_random_seed(int seed);
template<typename T>
void init_zero(size_t sz, T *buf)
{
#pragma omp parallel for
for(size_t i = 0; i < sz; i++)
buf[i] = (T)0;
}
template<typename T>
void init_random(size_t sz, T *buf, T low, T high)
{
T range = high - low;
#pragma omp parallel for schedule(static)
for(size_t i = 0; i < sz; i++) {
double randval;
drand48_r(&rand_buf, &randval);
buf[i] = randval * range - low;
}
}
inline void *my_malloc(size_t sz, size_t align)
{
return _mm_malloc(sz, align);
}
inline void my_free(void *p)
{
_mm_free(p);
}
#endif /*_UTILS_H_*/
|
GB_unaryop__identity_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_int8
// op(A') function: GB_tran__identity_bool_int8
// C type: bool
// A type: int8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_int8
(
bool *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8)
// A*D function (colscale): GB (_AxD__lt_int8)
// D*A function (rowscale): GB (_DxB__lt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8)
// C=scalar+B GB (_bind1st__lt_int8)
// C=scalar+B' GB (_bind1st_tran__lt_int8)
// C=A+scalar GB (_bind2nd__lt_int8)
// C=A'+scalar GB (_bind2nd_tran__lt_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ioc-ummap-bandwidth-mpi-dummy.c | //mpicc ioc-ummap-bandwidth-mpi.c -I$HOME/test-rdma/usr2/include -lioc-client -lummap-io -L$HOME/test-rdma/usr2/lib -Wl,-rpath,$HOME/test-rdma/usr2/lib -o ioc-ummap-bandwidth-mpi
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <ioc-client.h>
#include <time.h>
#include <mpi.h>
#include <ummap/ummap.h>
const size_t total_size = 4UL*1024UL*1024UL*1024UL;
const size_t ref_repeat = 10;
static inline double timespec_diff(struct timespec *a, struct timespec *b) {
struct timespec result;
result.tv_sec = a->tv_sec - b->tv_sec;
result.tv_nsec = a->tv_nsec - b->tv_nsec;
if (result.tv_nsec < 0) {
--result.tv_sec;
result.tv_nsec += 1000000000L;
}
return (double)result.tv_sec + (double)result.tv_nsec / (double)1e9;
}
void make_ummap_read(ioc_client_t * client, char * buffer0, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//calc base
size_t base = rank * size;
//ummap
ummap_driver_t * driver = ummap_driver_create_dummy(0);
ummap_policy_t * policy = ummap_policy_create_fifo(2 * seg_size, true);
int flags = 0;
if (seg_size <= 131072)
flags |= UMMAP_THREAD_UNSAFE;
char * buffer = ummap(NULL, size, seg_size, base, PROT_READ|PROT_WRITE, flags, driver, policy, NULL);
//access
size_t r;
size_t offset;
size_t sum = 0;
for (r = 0 ; r < repeat ; r++) {
#pragma omp parallel for
for (offset = 0 ; offset < size ; offset +=seg_size)
sum+=buffer[offset];
}
//unmap
umunmap(buffer, false);
}
void make_ummap_write(ioc_client_t * client, char * buffer0, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//calc base
size_t base = rank * size;
//ummap
ummap_driver_t * driver = ummap_driver_create_dummy(0);
ummap_policy_t * policy = ummap_policy_create_fifo(2 * seg_size, true);
int flags = 0;
if (seg_size <= 131072)
flags |= UMMAP_THREAD_UNSAFE;
char * buffer = ummap(NULL, size, seg_size, base, PROT_READ|PROT_WRITE, UMMAP_NO_FIRST_READ|flags, driver, policy, NULL);
//access
size_t r;
size_t offset;
for (r = 0 ; r < repeat ; r++) {
ummap_skip_first_read(buffer);
#pragma omp parallel for
for (offset = 0 ; offset < size ; offset += seg_size)
buffer[offset]++;
}
//unmap
umunmap(buffer, false);
}
void make_write(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//do
size_t r;
size_t offset;
size_t base = rank * size;
for (r = 0 ; r < repeat ; r++)
for (offset = 0 ; offset < size ; offset += seg_size)
ioc_client_obj_write(client, 10, 20, buffer, seg_size, base + offset);
}
void make_read(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//do
size_t r;
size_t offset;
size_t base = rank * size;
for (r = 0 ; r < repeat ; r++)
for (offset = 0 ; offset < size ; offset += seg_size)
ioc_client_obj_read(client, 10, 20, buffer, seg_size, base + offset);
}
double calc_bandwidth(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat, void(*op)(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat))
{
//wait all
MPI_Barrier(MPI_COMM_WORLD);
//start
struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC, &start);
//call to all
op(client, buffer, size, seg_size, repeat);
//wait all
MPI_Barrier(MPI_COMM_WORLD);
//stop
clock_gettime(CLOCK_MONOTONIC, &stop);
//compute time
double t = timespec_diff(&stop, &start);
//calc bandwidth
double bw = (double)repeat * (double)total_size / 1024.0 / 1024.0 / 1024.0 / t;
//ok return
return bw;
}
int main(int argc, char ** argv)
{
//check args
if (argc < 2) {
fprintf(stderr, "%s {ioc_server_ip}\n", argv[0]);
return EXIT_FAILURE;
}
//init MPI
MPI_Init(&argc, &argv);
//init ummapio
ummap_init();
//get MPI infos
int rank;
int world;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &world);
//connect to server
ioc_client_t * client = NULL;//ioc_client_init(argv[1], "8556");
//cal size
size_t size = total_size / world;
//allocate buffer
char * buffer = malloc(size);
memset(buffer, 0, size);
//to ensure object is created, make a first round trip
//calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_read);
//calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_write);
calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_ummap_read);
calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_ummap_write);
//header
if (rank == 0) {
printf("#total_size=%f GB\n", (double)total_size/1024.0/1024.0/1024.0);
printf("#world_size=%d\n", world);
printf("#seg_size (bytes) read (GB/s) twrite(GB/s)\n");
}
//loop on all size
size_t seg_size = 16 * 1024 * 1024;
for ( ; seg_size >= 4096 ; seg_size /= 2) {
//calc repeat
size_t repeat = ref_repeat;
//if (seg_size > 256*1024)
// repeat *= 2;
//measure read
//double read_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_read);
//double write_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_write);
double read_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_ummap_read);
double write_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_ummap_write);
//print
if (rank == 0)
printf("%zu %f %f\n", seg_size, read_bw, write_bw);
}
//close connection
//ioc_client_fini(client);
//fini ummap
ummap_finalize();
//fin i mpi
MPI_Finalize();
//ok
return EXIT_SUCCESS;
}
|
GB_binop__hypot_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp64)
// C=scalar+B GB (_bind1st__hypot_fp64)
// C=scalar+B' GB (_bind1st_tran__hypot_fp64)
// C=A+scalar GB (_bind2nd__hypot_fp64)
// C=A'+scalar GB (_bind2nd_tran__hypot_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = hypot (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = hypot (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_HYPOT || GxB_NO_FP64 || GxB_NO_HYPOT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__hypot_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__hypot_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__hypot_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__hypot_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__hypot_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = hypot (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__hypot_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = hypot (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = hypot (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__hypot_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = hypot (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omptarget.h | //===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of all library macros, types,
// and functions.
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_H
#define OMPTARGET_H
#include "common/allocator.h"
#include "common/debug.h" // debug
#include "common/state-queue.h"
#include "common/support.h"
#include "interface.h" // interfaces with omp, compiler, and user
#include "target_impl.h"
#define OMPTARGET_NVPTX_VERSION 1.1
// used by the library for the interface with the app
#define DISPATCH_FINISHED 0
#define DISPATCH_NOTFINISHED 1
// used by dynamic scheduling
#define FINISHED 0
#define NOT_FINISHED 1
#define LAST_CHUNK 2
#define BARRIER_COUNTER 0
#define ORDERED_COUNTER 1
// Worker slot type which is initialized with the default worker slot
// size of 4*32 bytes.
struct __kmpc_data_sharing_slot {
__kmpc_data_sharing_slot *Next;
__kmpc_data_sharing_slot *Prev;
void *PrevSlotStackPtr;
void *DataEnd;
char Data[DS_Worker_Warp_Slot_Size];
};
////////////////////////////////////////////////////////////////////////////////
// task ICV and (implicit & explicit) task state
class omptarget_nvptx_TaskDescr {
public:
// methods for flags
INLINE omp_sched_t GetRuntimeSched() const;
INLINE void SetRuntimeSched(omp_sched_t sched);
INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; }
INLINE int InL2OrHigherParallelRegion() const {
return items.flags & TaskDescr_InParL2P;
}
INLINE int IsParallelConstruct() const {
return items.flags & TaskDescr_IsParConstr;
}
INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); }
// methods for other fields
INLINE uint16_t &ThreadId() { return items.threadId; }
INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; }
INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; }
INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) {
prev = taskDescr;
}
// init & copy
INLINE void InitLevelZeroTaskDescr();
INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr);
INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr);
INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr,
uint16_t tid, uint16_t tnum);
INLINE void SaveLoopData();
INLINE void RestoreLoopData() const;
private:
// bits for flags: (6 used, 2 free)
// 3 bits (SchedMask) for runtime schedule
// 1 bit (InPar) if this thread has encountered one or more parallel region
// 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task)
// 1 bit (InParL2+) if this thread has encountered L2 or higher parallel
// region
static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4);
static const uint8_t TaskDescr_InPar = 0x10;
static const uint8_t TaskDescr_IsParConstr = 0x20;
static const uint8_t TaskDescr_InParL2P = 0x40;
struct SavedLoopDescr_items {
int64_t loopUpperBound;
int64_t nextLowerBound;
int64_t chunk;
int64_t stride;
kmp_sched_t schedule;
} loopData;
struct TaskDescr_items {
uint8_t flags; // 6 bit used (see flag above)
uint8_t unused;
uint16_t threadId; // thread id
uint64_t runtimeChunkSize; // runtime chunk size
} items;
omptarget_nvptx_TaskDescr *prev;
};
// build on kmp
typedef struct omptarget_nvptx_ExplicitTaskDescr {
omptarget_nvptx_TaskDescr
taskDescr; // omptarget_nvptx task description (must be first)
kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last)
} omptarget_nvptx_ExplicitTaskDescr;
////////////////////////////////////////////////////////////////////////////////
// Descriptor of a parallel region (worksharing in general)
class omptarget_nvptx_WorkDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; }
private:
omptarget_nvptx_TaskDescr masterTaskICV;
};
////////////////////////////////////////////////////////////////////////////////
class omptarget_nvptx_TeamDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() {
return &levelZeroTaskDescr;
}
INLINE omptarget_nvptx_WorkDescr &WorkDescr() {
return workDescrForActiveParallel;
}
// init
INLINE void InitTeamDescr();
INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) {
worker_rootS[wid].DataEnd =
&worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size;
// We currently do not have a next slot.
worker_rootS[wid].Next = 0;
worker_rootS[wid].Prev = 0;
worker_rootS[wid].PrevSlotStackPtr = 0;
return (__kmpc_data_sharing_slot *)&worker_rootS[wid];
}
private:
omptarget_nvptx_TaskDescr
levelZeroTaskDescr; // icv for team master initial thread
omptarget_nvptx_WorkDescr
workDescrForActiveParallel; // one, ONLY for the active par
ALIGN(16)
__kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number];
};
////////////////////////////////////////////////////////////////////////////////
// thread private data (struct of arrays for better coalescing)
// tid refers here to the global thread id
// do not support multiple concurrent kernel a this time
class omptarget_nvptx_ThreadPrivateContext {
public:
// task
INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) {
return &levelOneTaskDescr[tid];
}
INLINE void SetTopLevelTaskDescr(int tid,
omptarget_nvptx_TaskDescr *taskICV) {
topTaskDescr[tid] = taskICV;
}
INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const;
// schedule (for dispatch)
INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; }
INLINE int64_t &Chunk(int tid) { return chunk[tid]; }
INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; }
INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; }
INLINE int64_t &Stride(int tid) { return stride[tid]; }
INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; }
INLINE void InitThreadPrivateContext(int tid);
INLINE uint64_t &Cnt() { return cnt; }
private:
// team context for this team
omptarget_nvptx_TeamDescr teamContext;
// task ICV for implicit threads in the only parallel region
omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM];
// pointer where to find the current task ICV (top of the stack)
omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM];
// schedule (for dispatch)
kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for
int64_t chunk[MAX_THREADS_PER_TEAM];
int64_t loopUpperBound[MAX_THREADS_PER_TEAM];
// state for dispatch with dyn/guided OR static (never use both at a time)
int64_t nextLowerBound[MAX_THREADS_PER_TEAM];
int64_t stride[MAX_THREADS_PER_TEAM];
uint64_t cnt;
};
/// Memory manager for statically allocated memory.
class omptarget_nvptx_SimpleMemoryManager {
private:
struct MemDataTy {
volatile unsigned keys[OMP_STATE_COUNT];
} MemData[MAX_SM] ALIGN(128);
INLINE static uint32_t hash(unsigned key) {
return key & (OMP_STATE_COUNT - 1);
}
public:
INLINE void Release();
INLINE const void *Acquire(const void *buf, size_t size);
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager;
extern uint32_t EXTERN_SHARED(usedMemIdx);
extern uint32_t EXTERN_SHARED(usedSlotIdx);
#if _OPENMP
extern uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
#pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc)
#else
extern uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE];
#endif
extern uint16_t EXTERN_SHARED(threadLimit);
extern uint16_t EXTERN_SHARED(threadsInTeam);
extern uint16_t EXTERN_SHARED(nThreads);
extern omptarget_nvptx_ThreadPrivateContext *
EXTERN_SHARED(omptarget_nvptx_threadPrivateContext);
extern int8_t EXTERN_SHARED(execution_param);
extern void *EXTERN_SHARED(ReductionScratchpadPtr);
////////////////////////////////////////////////////////////////////////////////
// work function (outlined parallel/simd functions) and arguments.
// needed for L1 parallelism only.
////////////////////////////////////////////////////////////////////////////////
typedef void *omptarget_nvptx_WorkFn;
extern omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn);
////////////////////////////////////////////////////////////////////////////////
// get private data structures
////////////////////////////////////////////////////////////////////////////////
INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor();
INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor();
INLINE omptarget_nvptx_TaskDescr *
getMyTopTaskDescriptor(bool isSPMDExecutionMode);
INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId);
////////////////////////////////////////////////////////////////////////////////
// inlined implementation
////////////////////////////////////////////////////////////////////////////////
INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); }
INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); }
INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); }
INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); }
#include "common/omptargeti.h"
#endif
|
GB_unaryop__minv_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_bool
// op(A') function: GB_tran__minv_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_bool
(
uint16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-taskyield.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp taskyield
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskyield.c:3:1, line:5:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:5:1>
// CHECK-NEXT: `-OMPTaskyieldDirective {{.*}} <line:4:9, col:22> openmp_standalone_directive
|
symplectic_integrator.h | #pragma once
#include <functional>
#include "Common/list.h"
#include "Common/particle_group.h"
#include "AR/symplectic_step.h"
#include "AR/force.h"
#include "AR/slow_down.h"
#include "AR/profile.h"
#include "AR/information.h"
#include "AR/interrupt.h"
//! Algorithmic regularization (time transformed explicit symplectic integrator) namespace
/*!
All major AR classes and related acceleration functions (typedef) are defined
*/
namespace AR {
//! print features
void printFeatures(std::ostream & fout) {
#ifdef AR_TTL
fout<<"Use AR TTL method\n";
#else
fout<<"Use AR LogH method\n";
#endif
#ifdef AR_SLOWDOWN_TREE
fout<<"Use slowdown Tree method\n";
#endif
#ifdef AR_SLOWDOWN_ARRAY
fout<<"Use slowdown array method\n";
#endif
#ifdef AR_SLOWDOWN_TIMESCALE
fout<<"Use slowdown timescale criterion\n";
#endif
#ifdef AR_SLOWDOWN_MASSRATIO
fout<<"Use slowdown mass ratio criterion\n";
#endif
}
//! print debug features
void printDebugFeatures(std::ostream & fout) {
#ifdef AR_DEBUG
fout<<"Debug mode: AR\n";
#endif
}
//! print reference to cite
void printReference(std::ostream & fout, const int offset=4) {
for (int i=0; i<offset; i++) fout<<" ";
fout<<"SDAR: Wang L., Nitadori K., Makino J., 2020, MNRAS, 493, 3398"
<<std::endl;
}
//! Time Transformed Symplectic integrator manager
/*! Tmethod is the class contain the interaction function, see sample of interaction.h:\n
*/
template <class Tmethod>
class TimeTransformedSymplecticManager {
public:
Float time_error_max; ///> maximum time error (absolute), should be positive and larger than round-off error
Float energy_error_relative_max; ///> maximum energy error requirement
Float time_step_min; ///> minimum real time step allown
Float ds_scale; ///> scaling factor to determine ds
Float slowdown_pert_ratio_ref; ///> slowdown perturbation /inner ratio reference factor
#ifdef AR_SLOWDOWN_MASSRATIO
Float slowdown_mass_ref; ///> slowdown mass factor reference
#endif
Float slowdown_timescale_max; ///> slowdown maximum timescale to calculate maximum slowdown factor
long long unsigned int step_count_max; ///> maximum step counts
int interrupt_detection_option; ///> 1: detect interruption; 0: no detection
Tmethod interaction; ///> class contain interaction function
SymplecticStep step; ///> class to manager kick drift step
//! constructor
TimeTransformedSymplecticManager(): time_error_max(Float(-1.0)), energy_error_relative_max(Float(-1.0)), time_step_min(Float(-1.0)), ds_scale(1.0), slowdown_pert_ratio_ref(Float(-1.0)),
#ifdef AR_SLOWDOWN_MASSRATIO
slowdown_mass_ref(Float(-1.0)),
#endif
slowdown_timescale_max(0.0),
step_count_max(0), interrupt_detection_option(0), interaction(), step() {}
//! check whether parameters values are correct
/*! \return true: all correct
*/
bool checkParams() {
//ASSERT(time_error_max>ROUND_OFF_ERROR_LIMIT);
ASSERT(time_error_max>0.0);
ASSERT(energy_error_relative_max>ROUND_OFF_ERROR_LIMIT);
//ASSERT(time_step_min>ROUND_OFF_ERROR_LIMIT);
ASSERT(time_step_min>0.0);
ASSERT(ds_scale>0.0);
ASSERT(slowdown_pert_ratio_ref>0.0);
#ifdef AR_SLOWDOWN_MASSRATIO
ASSERT(slowdown_mass_ref>0.0);
#endif
ASSERT(slowdown_timescale_max>0);
ASSERT(step_count_max>0);
ASSERT(step.getOrder()>0);
ASSERT(interaction.checkParams());
return true;
}
//! write class data with BINARY format
/*! @param[in] _fout: file IO for write
*/
void writeBinary(FILE *_fout) {
size_t size = sizeof(*this) - sizeof(interaction) - sizeof(step);
fwrite(this, size, 1,_fout);
interaction.writeBinary(_fout);
step.writeBinary(_fout);
}
//! read class data with BINARY format and initial the array
/*! @param[in] _fin: file IO for read
@param[in] _version: version for reading. 0: default; 1: missing ds_scale
*/
void readBinary(FILE *_fin, int _version=0) {
if (_version==0) {
size_t size = sizeof(*this) - sizeof(interaction) - sizeof(step);
size_t rcount = fread(this, size, 1, _fin);
if (rcount<1) {
std::cerr<<"Error: TimeTransformedSymplecticManager parameter reading fails! requiring data number is 1, only obtain "<<rcount<<".\n";
abort();
}
}
else if (_version==1) {
size_t rcount = fread(this, sizeof(Float), 3, _fin);
if (rcount<3) {
std::cerr<<"Error: TimeTransformedSymplecticManager parameter data reading fails! requiring data number is 3, only obtain "<<rcount<<".\n";
abort();
}
ds_scale=1.0;
size_t size = sizeof(*this) - sizeof(interaction) - sizeof(step) - 4*sizeof(Float);
rcount = fread(&slowdown_pert_ratio_ref, size, 1, _fin);
if (rcount<1) {
std::cerr<<"Error: TimeTransformedSymplecticManager parameter data reading fails! requiring data number is 1, only obtain "<<rcount<<".\n";
abort();
}
}
else {
std::cerr<<"Error: TimeTransformedSymplecticManager.readBinary unknown version "<<_version<<", should be 0 or 1."<<std::endl;
abort();
}
interaction.readBinary(_fin);
step.readBinary(_fin);
}
//! print parameters
void print(std::ostream & _fout) const{
_fout<<"time_error_max : "<<time_error_max<<std::endl
<<"energy_error_relative_max : "<<energy_error_relative_max<<std::endl
<<"time_step_min : "<<time_step_min<<std::endl
<<"slowdown_pert_ratio_ref : "<<slowdown_pert_ratio_ref<<std::endl
#ifdef AR_SLOWDOWN_MASSRATIO
<<"slowdown_mass_ref : "<<slowdown_mass_ref<<std::endl
#endif
<<"slowdown_timescale_max : "<<slowdown_timescale_max<<std::endl
<<"step_count_max : "<<step_count_max<<std::endl
<<"ds_scale : "<<ds_scale<<std::endl;
interaction.print(_fout);
step.print(_fout);
}
};
//! Time Transformed Symplectic integrator class for a group of particles
/*! The basic steps to use the integrator \n
1. Add particles (particles.addParticle/particles.linkParticleList) \n
2. Initial system (initial) \n
3. Integration (integrateOneStep/integrateToTime) \n
Requirement for Tparticle class, public memebers: pos[3], vel[3], mass\n
Template dependence: Tparticle: particle type; Tpcm: particle cm type Tpert: perturber class type, Tmethod: interaction class;
*/
template <class Tparticle, class Tpcm, class Tpert, class Tmethod, class Tinfo>
class TimeTransformedSymplecticIntegrator {
private:
// intergrated variables
Float time_; ///< integrated time (not the physical time, it is always 0 initially)
Float etot_ref_; ///< integrated system energy
// calculated varaiables
Float ekin_; ///< kinetic energy
Float epot_; ///< potential
// cumulative slowdown (inner + outer) energy change
Float de_change_interrupt_; // energy change due to interruption
Float dH_change_interrupt_; // hamiltonian change due to interruption
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float ekin_sd_; ///< slowdown (inner) kinetic energy
Float epot_sd_; ///< slowdown (inner) potential energy
Float etot_sd_ref_; ///< slowdown (inner) total energy
Float de_sd_change_cum_; // slowdown energy change
Float dH_sd_change_cum_; // slowdown Hamiltonian change
Float de_sd_change_interrupt_; // slowdown energy change due to interruption
Float dH_sd_change_interrupt_; // slowdown energy change due to interruption
#endif
#ifdef AR_TTL
// transformation factors
Float gt_drift_inv_; ///< integrated inverse time transformation factor for drift: dt(drift) = ds/gt_drift_inv_
Float gt_kick_inv_; ///< inverse time transformation factor for kick: dt(kick) = ds/gt_kick_inv_
#endif
// force array
COMM::List<Force> force_; ///< acceleration array
public:
TimeTransformedSymplecticManager<Tmethod>* manager; ///< integration manager
COMM::ParticleGroup<Tparticle,Tpcm> particles; ///< particle group manager
#ifdef AR_SLOWDOWN_ARRAY
COMM::List<AR::BinaryTree<Tparticle>*> binary_slowdown; /// binary slowdown, first is root, then others are inner bianries
#endif
Tpert perturber; ///< perturber class
Tinfo info; ///< information of the system
Profile profile; ///< profile to measure the performance
//! Constructor
TimeTransformedSymplecticIntegrator(): time_(0), etot_ref_(0), ekin_(0), epot_(0), de_change_interrupt_(0), dH_change_interrupt_(0),
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
ekin_sd_(0), epot_sd_(0), etot_sd_ref_(0),
de_sd_change_cum_(0), dH_sd_change_cum_(0), de_sd_change_interrupt_(0), dH_sd_change_interrupt_(0),
#endif
#ifdef AR_TTL
gt_drift_inv_(0), gt_kick_inv_(0),
#endif
force_(), manager(NULL), particles(),
#ifdef AR_SLOWDOWN_ARRAY
binary_slowdown(),
#endif
perturber(), info(), profile() {}
//! check whether parameters values are correct
/*! \return true: all correct
*/
bool checkParams() {
ASSERT(manager!=NULL);
ASSERT(manager->checkParams());
ASSERT(perturber.checkParams());
ASSERT(info.checkParams());
return true;
}
//! reserve memory for force
/*! The size of force depends on the particle data size.Thus particles should be added first before call this function
*/
void reserveIntegratorMem() {
// force array always allocated local memory
int nmax = particles.getSizeMax();
ASSERT(nmax>0);
force_.setMode(COMM::ListMode::local);
force_.reserveMem(nmax);
#ifdef AR_SLOWDOWN_ARRAY
binary_slowdown.setMode(COMM::ListMode::local);
binary_slowdown.reserveMem(nmax/2+1);
#endif
}
//! Clear function
/*! Free dynamical memory space allocated
*/
void clear() {
time_ = 0.0;
etot_ref_ =0.0;
ekin_ = 0.0;
epot_ = 0.0;
de_change_interrupt_ = 0.0;
dH_change_interrupt_ = 0.0;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
ekin_sd_ = 0.0;
epot_sd_ = 0.0;
etot_sd_ref_ = 0.0;
de_sd_change_cum_ = 0.0;
dH_sd_change_cum_ = 0.0;
de_sd_change_interrupt_ = 0.0;
dH_sd_change_interrupt_ = 0.0;
#endif
#ifdef AR_TTL
gt_drift_inv_ = 0.0;
gt_kick_inv_ = 0.0;
#endif
force_.clear();
particles.clear();
#ifdef AR_SLOWDOWN_ARRAY
binary_slowdown.clear();
#endif
perturber.clear();
info.clear();
profile.clear();
}
//! destructor
~TimeTransformedSymplecticIntegrator() {
clear();
}
//! operator =
/*! Copy function will remove the local data and also copy the particle data or the link
*/
TimeTransformedSymplecticIntegrator& operator = (const TimeTransformedSymplecticIntegrator& _sym) {
clear();
time_ = _sym.time_;
etot_ref_ = _sym.etot_ref_;
ekin_ = _sym.ekin_;
epot_ = _sym.epot_;
de_change_interrupt_= _sym.de_change_interrupt_;
dH_change_interrupt_= _sym.dH_change_interrupt_;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
ekin_sd_= _sym.ekin_sd_;
epot_sd_= _sym.epot_sd_;
etot_sd_ref_= _sym.etot_sd_ref_;
de_sd_change_cum_= _sym.de_sd_change_cum_;
dH_sd_change_cum_= _sym.dH_sd_change_cum_;
de_sd_change_interrupt_= _sym.de_sd_change_interrupt_;
dH_sd_change_interrupt_= _sym.dH_sd_change_interrupt_;
#endif
#ifdef AR_TTL
gt_drift_inv_ = _sym.gt_drift_inv_;
gt_kick_inv_ = _sym.gt_kick_inv_;
#endif
force_ = _sym.force_;
manager = _sym.manager;
#ifdef AR_SLOWDOWN_ARRAY
binary_slowdown = _sym.binary_slowdown;
#endif
particles = _sym.particles;
info = _sym.binarytree;
profile = _sym.profile;
return *this;
}
private:
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
//! iteration function to calculate perturbation and timescale information from binary tree j to binary i
/*!
@param[out] _pert_out: perturbation from particle j
@param[out] _t_min_sq: timescale limit from particle j
@param[in] _bini: binary i
@param[in] _binj: binary tree j
*/
void calcSlowDownPertInnerBinaryIter(Float& _pert_out, Float& _t_min_sq, AR::BinaryTree<Tparticle>& _bini, AR::BinaryTree<Tparticle>& _binj) {
ASSERT(&_bini != &_binj);
// ASSERT(_bini.getMemberIndex(0)!=_binj.getMemberIndex(0));
for (int k=0; k<2; k++) {
if (_binj.isMemberTree(k)) {
auto* bink = _binj.getMemberAsTree(k);
int check_flag = _bini.isSameBranch(*bink);
if (check_flag==0) {// no relation
if (bink->semi>0.0) manager->interaction.calcSlowDownPertOne(_pert_out, _t_min_sq, _bini, *bink);
else calcSlowDownPertInnerBinaryIter(_pert_out, _t_min_sq, _bini, *bink);
}
else if (check_flag==-2) { // _binj is the upper root tree
calcSlowDownPertInnerBinaryIter(_pert_out, _t_min_sq, _bini, *bink);
}
// other cases (same or sub branch), stop iteration.
}
else {
auto* pk = _binj.getMember(k);
if (pk->mass>0.0) manager->interaction.calcSlowDownPertOne(_pert_out, _t_min_sq, _bini, *pk);
}
}
}
//! calculate slowdown factor for inner binary based on other particles and slowdown of system c.m.
/*!
@param[in] _bin: binary tree for calculating slowdown
*/
void calcSlowDownInnerBinary(BinaryTree<Tparticle>& _bin) {
_bin.slowdown.pert_in = manager->interaction.calcPertFromBinary(_bin);
Float pert_out = 0.0;
Float t_min_sq = NUMERIC_FLOAT_MAX;
auto& bin_root = info.getBinaryTreeRoot();
calcSlowDownPertInnerBinaryIter(pert_out, t_min_sq, _bin, bin_root);
_bin.slowdown.pert_out = pert_out + bin_root.slowdown.pert_out;
#ifdef AR_SLOWDOWN_TIMESCALE
// velocity dependent method
//Float trv_ave = sdtdat.mtot/sqrt(sdtdat.mvor[0]*sdtdat.mvor[0] + sdtdat.mvor[1]*sdtdat.mvor[1] + sdtdat.mvor[2]*sdtdat.mvor[2]);
// get min of velocity and force dependent values
//Float t_min = std::min(trv_ave, sqrt(sdtdat.trf2_min));
_bin.slowdown.timescale = std::min(_bin.slowdown.getTimescaleMax(), sqrt(t_min_sq));
// stablility criterion
// The slowdown factor should not make the system unstable, thus the Qst/Q set the limitation of the increasing of inner semi-major axis.
if (_bin.stab>0 && _bin.stab != NUMERIC_FLOAT_MAX) {
Float semi_amplify_max = std::max(Float(1.0),1.0/_bin.stab);
Float period_amplify_max = pow(semi_amplify_max,3.0/2.0);
Float timescale_stab = period_amplify_max*_bin.period;
_bin.slowdown.timescale = std::min(_bin.slowdown.timescale, timescale_stab);
}
#else
_bin.slowdown.timescale = _bin.slowdown.getTimescaleMax();
#endif
// only set slowdown if semi > 0 and stable
bool set_sd_flag = true;
if (_bin.semi>0) {
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
if (bink->stab>1.0) set_sd_flag = false;
}
}
}
else set_sd_flag = false;
if (set_sd_flag) {
_bin.slowdown.period = _bin.period;
_bin.slowdown.calcSlowDownFactor();
}
else {
_bin.slowdown.setSlowDownFactor(1.0);
}
}
#endif // AR_SLOWDOWN_TREE || AR_SLOWDOWN_ARRAY
#ifdef AR_SLOWDOWN_TREE
//! Calculate twice (slowdown) kinetic energy iteration function with binary tree
/*! cumulative ekin_ and ekin_sd_. Notice these two values should be initialized to zero and reduce by two after iteration.
@param[in] _inv_nest_sd_up: upper inverse nested slowdown factor
@param[in] _bin: current binary tree for kick etot and calc dgt_drift
*/
void calcTwoEKinIter(const Float& _inv_nest_sd_up, AR::BinaryTree<Tparticle>& _bin){
Float inv_nest_sd = _inv_nest_sd_up/_bin.slowdown.getSlowDownFactor();
Float* vel_cm = _bin.getVel();
for (int k=0; k<2; k++) {
Float* vk;
Float mk;
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
vk = bink->getVel();
mk = bink->mass;
calcTwoEKinIter(inv_nest_sd, *bink);
}
else {
auto* pk = _bin.getMember(k);
vk = pk->getVel();
mk = pk->mass;
ekin_ += mk * (vk[0]*vk[0]+vk[1]*vk[1]+vk[2]*vk[2]);
}
Float vrel[3] = {vk[0] - vel_cm[0],
vk[1] - vel_cm[1],
vk[2] - vel_cm[2]};
ekin_sd_ += mk * inv_nest_sd * (vrel[0]*vrel[0] + vrel[1]*vrel[1] + vrel[2]*vrel[2]);
}
}
//! Calculate (slowdown) kinetic energy
void calcEKin() {
ekin_ = ekin_sd_ = 0.0;
auto& bin_root=info.getBinaryTreeRoot();
Float sd_factor=1.0;
calcTwoEKinIter(sd_factor, bin_root);
Float* vcm = bin_root.getVel();
// notice the cm velocity may not be zero after interruption, thus need to be added
ekin_sd_ += bin_root.mass*(vcm[0]*vcm[0] + vcm[1]*vcm[1] + vcm[2]*vcm[2]);
ekin_ *= 0.5;
ekin_sd_ *= 0.5;
}
//! kick velocity
/*! First time step will be calculated, the velocities are kicked
@param[in] _dt: time size
*/
void kickVel(const Float& _dt) {
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
Force* force = force_.getDataAddress();
for (int i=0; i<num; i++) {
// kick velocity
Float* vel = pdat[i].getVel();
Float* acc = force[i].acc_in;
Float* pert= force[i].acc_pert;
// half dv
vel[0] += _dt * (acc[0] + pert[0]);
vel[1] += _dt * (acc[1] + pert[1]);
vel[2] += _dt * (acc[2] + pert[2]);
}
// update binary c.m. velocity interation
updateBinaryVelIter(info.getBinaryTreeRoot());
}
//! drift position with slowdown tree
/*!
@param[in] _dt: drift time
@param[in] _vel_sd_up: upper cm sd vel
@param[in] _inv_nest_sd_up: upper inverse nested slowdown factor
@param[in] _bin: current binary to drift pos
*/
void driftPosTreeIter(const Float& _dt, const Float* _vel_sd_up, const Float& _inv_nest_sd_up, AR::BinaryTree<Tparticle>& _bin) {
// current nested sd factor
Float inv_nest_sd = _inv_nest_sd_up/_bin.slowdown.getSlowDownFactor();
Float* vel_cm = _bin.getVel();
auto driftPos=[&](Float* pos, Float* vel, Float* vel_sd) {
//scale velocity referring to binary c.m.
vel_sd[0] = (vel[0] - vel_cm[0]) * inv_nest_sd + _vel_sd_up[0];
vel_sd[1] = (vel[1] - vel_cm[1]) * inv_nest_sd + _vel_sd_up[1];
vel_sd[2] = (vel[2] - vel_cm[2]) * inv_nest_sd + _vel_sd_up[2];
pos[0] += _dt * vel_sd[0];
pos[1] += _dt * vel_sd[1];
pos[2] += _dt * vel_sd[2];
};
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* pj = _bin.getMemberAsTree(k);
Float* pos = pj->getPos();
Float* vel = pj->getVel();
Float vel_sd[3];
driftPos(pos, vel, vel_sd);
driftPosTreeIter(_dt, vel_sd, inv_nest_sd, *pj);
//#ifdef AR_DEBUG
// auto& pos1= pj->getLeftMember()->pos;
// auto& pos2= pj->getRightMember()->pos;
// Float m1 = pj->getLeftMember()->mass;
// Float m2 = pj->getRightMember()->mass;
// Float pos_cm[3] = {(m1*pos1[0]+m2*pos2[0])/(m1+m2),
// (m1*pos1[1]+m2*pos2[1])/(m1+m2),
// (m1*pos1[2]+m2*pos2[2])/(m1+m2)};
// Float dpos[3] = {pos[0]-pos_cm[0],
// pos[1]-pos_cm[1],
// pos[2]-pos_cm[2]};
// Float dr2 = dpos[0]*dpos[0]+dpos[1]*dpos[1]+dpos[2]*dpos[2];
// ASSERT(dr2<1e-10);
//#endif
}
else {
auto* pj = _bin.getMember(k);
Float* pos = pj->getPos();
Float* vel = pj->getVel();
Float vel_sd[3];
driftPos(pos, vel, vel_sd);
}
}
}
//! drift time and position with slowdown tree
/*! First (real) time is drifted, then positions are drifted
@param[in] _dt: time step
*/
void driftTimeAndPos(const Float& _dt) {
// drift time
time_ += _dt;
// the particle cm velocity is zero (assume in rest-frame)
ASSERT(!particles.isOriginFrame());
auto& bin_root=info.getBinaryTreeRoot();
Float vel_cm[3] = {0.0,0.0,0.0};
Float sd_factor=1.0;
driftPosTreeIter(_dt, vel_cm, sd_factor, bin_root);
}
#ifdef AR_TIME_FUNCTION_MULTI_R
//! calc inverse R
Float calcInvR(Tparticle& _p1, Tparticle& _p2) {
Float dr[3] = {_p1.pos[0] - _p2.pos[0],
_p1.pos[1] - _p2.pos[1],
_p1.pos[2] - _p2.pos[2]};
Float r2 = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
Float invr = 1/sqrt(r2);
return invr;
}
//! calc multiplied inverse R of binary tree
Float calcMultiInvRIter(AR::BinaryTree<Tparticle>& _bin){
Float gt_kick_inv= calcInvR(*_bin.getLeftMember(), *_bin.getRightMember());
for (int k=0; k<2; k++) {
if (_bini.isMemberTree(k)) // tree - tree
gt_kick_inv *= calcMultiInvRIter(*(_bini.getMemberAsTree(k)));
}
return gt_kick_inv;
}
//! calc gt_drift_inv for binary tree
Float calcGtDriftInvIter(AR::BinaryTree<Tparticle>& _bin){
Float gt_drift_inv= calInvR(*_bin.getLeftMember(), *_bin.getRightMember());
for (int k=0; k<2; k++) {
if (_bini.isMemberTree(k)) // tree - tree
gt_kick_inv *= calcMultiInvRIter(*(_bini.getMemberAsTree(k)));
}
return gt_kick_inv;
}
#endif
//! calc force, potential and inverse time transformation factor for one pair of particles
/*!
@param[in] _inv_nest_sd: inverse nested slowdown factor
@param[in] _i: particle i index
@param[in] _j: particle j index
\return gt_kick_inv: inverse time transformation factor for kick
*/
Float calcAccPotAndGTKickInvTwo(const Float& _inv_nest_sd, const int _i, const int _j) {
ASSERT(_i>=0&&_i<particles.getSize());
ASSERT(_j>=0&&_j<particles.getSize());
// calculate pair interaction
Force fij[2];
Float epotij;
Float gt_kick_inv = manager->interaction.calcInnerAccPotAndGTKickInvTwo(fij[0], fij[1], epotij, particles[_i], particles[_j]);
// scale binary pair force with slowdown
force_[_i].acc_in[0] += fij[0].acc_in[0]*_inv_nest_sd;
force_[_i].acc_in[1] += fij[0].acc_in[1]*_inv_nest_sd;
force_[_i].acc_in[2] += fij[0].acc_in[2]*_inv_nest_sd;
force_[_j].acc_in[0] += fij[1].acc_in[0]*_inv_nest_sd;
force_[_j].acc_in[1] += fij[1].acc_in[1]*_inv_nest_sd;
force_[_j].acc_in[2] += fij[1].acc_in[2]*_inv_nest_sd;
epot_ += epotij;
epot_sd_ += epotij*_inv_nest_sd;
#ifdef AR_TTL
// scale gtgrad with slowdown
force_[_i].gtgrad[0] += fij[0].gtgrad[0]*_inv_nest_sd;
force_[_i].gtgrad[1] += fij[0].gtgrad[1]*_inv_nest_sd;
force_[_i].gtgrad[2] += fij[0].gtgrad[2]*_inv_nest_sd;
force_[_j].gtgrad[0] += fij[1].gtgrad[0]*_inv_nest_sd;
force_[_j].gtgrad[1] += fij[1].gtgrad[1]*_inv_nest_sd;
force_[_j].gtgrad[2] += fij[1].gtgrad[2]*_inv_nest_sd;
#endif
return gt_kick_inv * _inv_nest_sd;
}
//! calc force, potential and inverse time transformation factor for one particle by walking binary tree
/*!
@param[in] _inv_nest_sd: inverse nested slowdown factor
@param[in] _i: particle index
@param[in] _bin: binary tree for walking
\return gt_kick_inv: inverse time transformation factor for kick
*/
Float calcAccPotAndGTKickInvOneTreeIter(const Float& _inv_nest_sd, const int _i, AR::BinaryTree<Tparticle>& _bin) {
Float gt_kick_inv=0.0;
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) // particle - tree
gt_kick_inv += calcAccPotAndGTKickInvOneTreeIter(_inv_nest_sd, _i, *(_bin.getMemberAsTree(k)));
else // particle - particle
gt_kick_inv += calcAccPotAndGTKickInvTwo(_inv_nest_sd, _i, _bin.getMemberIndex(k));
}
return gt_kick_inv;
}
//! calc crossing force, potential and inverse time transformation factor between binary tree i and binary tree j
/*!
@param[in] _inv_nest_sd: inverse nested slowdown factor
@param[in] _bini: binary tree i for walking
@param[in] _binj: binary tree j for walking
\return gt_kick_inv: inverse time transformation factor for kick
*/
Float calcAccPotAndGTKickInvCrossTreeIter(const Float& _inv_nest_sd, AR::BinaryTree<Tparticle>& _bini, AR::BinaryTree<Tparticle>& _binj) {
ASSERT(&_bini!=&_binj);
Float gt_kick_inv=0.0;
for (int k=0; k<2; k++) {
if (_bini.isMemberTree(k)) // tree - tree
gt_kick_inv += calcAccPotAndGTKickInvCrossTreeIter(_inv_nest_sd, *(_bini.getMemberAsTree(k)), _binj);
else // particle - tree
gt_kick_inv += calcAccPotAndGTKickInvOneTreeIter(_inv_nest_sd, _bini.getMemberIndex(k), _binj);
}
return gt_kick_inv;
}
//! calc force, potential and inverse time transformation factor for kick
/*!
@param[in] _inv_nest_sd_up: upper inverse nested slowdown factor
@param[in] _bin: current binary to drift pos
\return gt_kick_inv: inverse time transformation factor for kick
*/
Float calcAccPotAndGTKickInvTreeIter(const Float& _inv_nest_sd_up, AR::BinaryTree<Tparticle>& _bin) {
// current nested sd factor
Float inv_nest_sd = _inv_nest_sd_up/_bin.slowdown.getSlowDownFactor();
Float gt_kick_inv = 0.0;
// check left
if (_bin.isMemberTree(0)) { // left is tree
auto* bin_left = _bin.getMemberAsTree(0);
// inner interaction of left tree
gt_kick_inv += calcAccPotAndGTKickInvTreeIter(inv_nest_sd, *bin_left);
if (_bin.isMemberTree(1)) { // right is tree
auto* bin_right = _bin.getMemberAsTree(1);
// inner interaction of right tree
gt_kick_inv += calcAccPotAndGTKickInvTreeIter(inv_nest_sd, *bin_right);
// cross interaction
gt_kick_inv += calcAccPotAndGTKickInvCrossTreeIter(inv_nest_sd, *bin_left, *bin_right);
}
else { // right is particle
// cross interaction from particle j to tree left
gt_kick_inv += calcAccPotAndGTKickInvOneTreeIter(inv_nest_sd, _bin.getMemberIndex(1), *bin_left);
}
}
else { // left is particle
if (_bin.isMemberTree(1)) { // right is tree
auto* bin_right = _bin.getMemberAsTree(1);
// inner interaction of right tree
gt_kick_inv += calcAccPotAndGTKickInvTreeIter(inv_nest_sd, *bin_right);
// cross interaction from particle i to tree right
gt_kick_inv += calcAccPotAndGTKickInvOneTreeIter(inv_nest_sd, _bin.getMemberIndex(0), *bin_right);
}
else { // right is particle
// particle - particle interaction
gt_kick_inv += calcAccPotAndGTKickInvTwo(inv_nest_sd, _bin.getMemberIndex(0), _bin.getMemberIndex(1));
}
}
#ifdef AR_TIME_FUNCTION_MULTI_R
gt_kick_inv = calcMultiInvRIter(_bin);
#endif
return gt_kick_inv;
}
//! calc force, potential and inverse time transformation factor for kick
/*!
\return gt_kick_inv: inverse time transformation factor for kick
*/
inline Float calcAccPotAndGTKickInv() {
epot_ = 0.0;
epot_sd_ = 0.0;
for (int i=0; i<force_.getSize(); i++) force_[i].clear();
Float gt_kick_inv = calcAccPotAndGTKickInvTreeIter(1.0, info.getBinaryTreeRoot());
// pertuber force
manager->interaction.calcAccPert(force_.getDataAddress(), particles.getDataAddress(), particles.getSize(), particles.cm, perturber, getTime());
return gt_kick_inv;
}
#ifdef AR_TTL
//! kick energy and time transformation function for drift of binary tree
/*!
@param[in] _dt: time step
@param[in] _vel_sd_up: upper cm sd vel
@param[in] _inv_nest_sd_up: upper inverse nested slowdown factor
@param[in] _bin: current binary tree for kick etot and calc dgt_drift
\return gt_drift_inv change
*/
Float kickEtotAndGTDriftTreeIter(const Float& _dt, const Float* _vel_sd_up, const Float& _inv_nest_sd_up, AR::BinaryTree<Tparticle>& _bin) {
// current nested sd factor
Float inv_nest_sd = _inv_nest_sd_up/_bin.slowdown.getSlowDownFactor();
Float dgt_drift_inv = 0.0;
Float de = 0.0;
Float* vel_cm = _bin.getVel();
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
Float* vel = bink->getVel();
Float vel_sd[3] = {(vel[0] - vel_cm[0]) * inv_nest_sd + _vel_sd_up[0],
(vel[1] - vel_cm[1]) * inv_nest_sd + _vel_sd_up[1],
(vel[2] - vel_cm[2]) * inv_nest_sd + _vel_sd_up[2]};
dgt_drift_inv += kickEtotAndGTDriftTreeIter(_dt, vel_sd, inv_nest_sd, *bink);
}
else {
int i = _bin.getMemberIndex(k);
ASSERT(i>=0&&i<particles.getSize());
ASSERT(&particles[i]==_bin.getMember(k));
Float* gtgrad = force_[i].gtgrad;
Float* pert = force_[i].acc_pert;
Float* vel = particles[i].getVel();
Float vel_sd[3] = {(vel[0] - vel_cm[0]) * inv_nest_sd + _vel_sd_up[0],
(vel[1] - vel_cm[1]) * inv_nest_sd + _vel_sd_up[1],
(vel[2] - vel_cm[2]) * inv_nest_sd + _vel_sd_up[2]};
de += particles[i].mass * (vel[0] * pert[0] +
vel[1] * pert[1] +
vel[2] * pert[2]);
#ifndef AR_TIME_FUNCTION_MULTI_R
dgt_drift_inv += (vel_sd[0] * gtgrad[0] +
vel_sd[1] * gtgrad[1] +
vel_sd[2] * gtgrad[2]);
#endif
}
}
etot_ref_ += _dt * de;
etot_sd_ref_ += _dt * de;
return dgt_drift_inv;
}
//! kick energy and time transformation function for drift
/*!
@param[in] _dt: time step
*/
void kickEtotAndGTDrift(const Float _dt) {
// the particle cm velocity is zero (assume in rest-frame)
ASSERT(!particles.isOriginFrame());
auto& bin_root=info.getBinaryTreeRoot();
Float vel_cm[3] = {0.0,0.0,0.0};
Float sd_factor=1.0;
Float dgt_drift_inv = kickEtotAndGTDriftTreeIter(_dt, vel_cm, sd_factor, bin_root);
#ifdef AR_TIME_FUNCTION_MULTI_R
dgt_drift_inv = calcGTDriftInvIter(bin_root);
#endif
gt_drift_inv_ += dgt_drift_inv*_dt;
}
#else //! AR_TTL
//! kick energy
/*!
@param[in] _dt: time step
*/
inline void kickEtot(const Float _dt) {
Float de = 0.0;
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
Force* force = force_.getDataAddress();
for (int i=0;i<num;i++) {
Float mass= pdat[i].mass;
Float* vel = pdat[i].getVel();
Float* pert= force[i].acc_pert;
de += mass * (vel[0] * pert[0] +
vel[1] * pert[1] +
vel[2] * pert[2]);
}
etot_sd_ref_ += _dt * de;
etot_ref_ += _dt * de;
}
#endif // AR_TTL
#else //! AR_SLOWDOWN_TREE
#ifdef AR_SLOWDOWN_ARRAY
//! correct force, potential energy and gt_kick_inv based on slowdown for inner binaries
/*!
@param[in,out] _gt_kick_inv: the inverse time transformation factor for kick step (input), be corrected with slowdown (output)
*/
inline void correctAccPotGTKickInvSlowDownInner(Float& _gt_kick_inv) {
int n = binary_slowdown.getSize();
Float gt_kick_inv_cor = 0.0;
Float de = 0.0;
for (int i=1; i<n; i++) {
auto& sdi = binary_slowdown[i];
ASSERT(sdi!=NULL);
int i1 = sdi->getMemberIndex(0);
int i2 = sdi->getMemberIndex(1);
Float kappa = sdi->slowdown.getSlowDownFactor();
Float kappa_inv = 1.0/kappa;
if (i1>=0) {
ASSERT(i2>=0);
ASSERT(i1!=i2);
ASSERT(i1<particles.getSize());
ASSERT(i2<particles.getSize());
// calculate pair interaction
Force fi[2];
Float epoti;
Float gt_kick_inv_i = manager->interaction.calcInnerAccPotAndGTKickInvTwo(fi[0], fi[1], epoti, particles[i1], particles[i2]);
// scale binary pair force with slowdown
force_[i1].acc_in[0] += fi[0].acc_in[0]*kappa_inv - fi[0].acc_in[0];
force_[i1].acc_in[1] += fi[0].acc_in[1]*kappa_inv - fi[0].acc_in[1];
force_[i1].acc_in[2] += fi[0].acc_in[2]*kappa_inv - fi[0].acc_in[2];
force_[i2].acc_in[0] += fi[1].acc_in[0]*kappa_inv - fi[1].acc_in[0];
force_[i2].acc_in[1] += fi[1].acc_in[1]*kappa_inv - fi[1].acc_in[1];
force_[i2].acc_in[2] += fi[1].acc_in[2]*kappa_inv - fi[1].acc_in[2];
de += epoti*kappa_inv - epoti;
// scale gtgrad with slowdown
#ifdef AR_TTL
force_[i1].gtgrad[0] += fi[0].gtgrad[0]*kappa_inv - fi[0].gtgrad[0];
force_[i1].gtgrad[1] += fi[0].gtgrad[1]*kappa_inv - fi[0].gtgrad[1];
force_[i1].gtgrad[2] += fi[0].gtgrad[2]*kappa_inv - fi[0].gtgrad[2];
force_[i2].gtgrad[0] += fi[1].gtgrad[0]*kappa_inv - fi[1].gtgrad[0];
force_[i2].gtgrad[1] += fi[1].gtgrad[1]*kappa_inv - fi[1].gtgrad[1];
force_[i2].gtgrad[2] += fi[1].gtgrad[2]*kappa_inv - fi[1].gtgrad[2];
#endif
// gt kick
gt_kick_inv_cor += gt_kick_inv_i*(kappa_inv - 1.0);
}
}
// global slowdown
const Float kappa_inv_global = 1.0/binary_slowdown[0]->slowdown.getSlowDownFactor();
for (int i=0; i<force_.getSize(); i++) {
force_[i].acc_in[0] *= kappa_inv_global;
force_[i].acc_in[1] *= kappa_inv_global;
force_[i].acc_in[2] *= kappa_inv_global;
#ifdef AR_TTL
force_[i].gtgrad[0] *= kappa_inv_global;
force_[i].gtgrad[1] *= kappa_inv_global;
force_[i].gtgrad[2] *= kappa_inv_global;
#endif
}
epot_sd_ = (epot_ + de)*kappa_inv_global;
_gt_kick_inv = (_gt_kick_inv + gt_kick_inv_cor)*kappa_inv_global;
}
//! correct postion drift due to inner binary slowdown
/*!
@param[in] _dt: time step
@param[in] _sd_global_inv: global slowdown factor inverse
*/
inline void correctPosSlowDownInner(const Float _dt, const Float _sd_global_inv) {
int n = binary_slowdown.getSize();
for (int i=1; i<n; i++) {
auto& sdi = binary_slowdown[i];
ASSERT(sdi!=NULL);
Float kappa = sdi->slowdown.getSlowDownFactor();
Float kappa_inv_m_one = (1.0/kappa - 1.0)*_sd_global_inv;
Float* velcm = sdi->getVel();
for (int k=0; k<2; k++) {
int j = sdi->getMemberIndex(k);
ASSERT(j>=0&&j<particles.getSize());
Float* pos = particles[j].getPos();
Float* vel = particles[j].getVel();
// only scale velocity referring to binary c.m.
Float vrel[3] = { vel[0] - velcm[0],
vel[1] - velcm[1],
vel[2] - velcm[2]};
pos[0] += _dt * vrel[0] * kappa_inv_m_one;
pos[1] += _dt * vrel[1] * kappa_inv_m_one;
pos[2] += _dt * vrel[2] * kappa_inv_m_one;
}
}
}
//! update c.m. for binaries with slowdown inner
inline void updateCenterOfMassForBinaryWithSlowDownInner() {
int n = binary_slowdown.getSize();
for (int i=1; i<n; i++) {
auto& sdi = binary_slowdown[i];
int i1 = sdi->getMemberIndex(0);
int i2 = sdi->getMemberIndex(1);
ASSERT(i1>=0&&i1<particles.getSize());
ASSERT(i2>=0&&i2<particles.getSize());
Float m1 = particles[i1].mass;
Float* pos1 = particles[i1].getPos();
Float* vel1 = particles[i1].getVel();
Float m2 = particles[i2].mass;
Float* pos2 = particles[i2].getPos();
Float* vel2 = particles[i2].getVel();
Float mcm = m1+m2;
// first obtain the binary c.m. velocity
Float mcminv = 1.0/mcm;
sdi->mass = mcm;
Float* pos = sdi->getPos();
pos[0] = (m1*pos1[0] + m2*pos2[0])*mcminv;
pos[1] = (m1*pos1[1] + m2*pos2[1])*mcminv;
pos[2] = (m1*pos1[2] + m2*pos2[2])*mcminv;
Float* vel = sdi->getVel();
vel[0] = (m1*vel1[0] + m2*vel2[0])*mcminv;
vel[1] = (m1*vel1[1] + m2*vel2[1])*mcminv;
vel[2] = (m1*vel1[2] + m2*vel2[2])*mcminv;
}
}
//! calculate kinetic energy with slowdown factor
/*! @param[in] _ekin: total kinetic energy without slowdown
*/
inline void calcEkinSlowDownInner(const Float& _ekin) {
int n = binary_slowdown.getSize();
if (n==0) return;
const Float kappa_inv_global = 1.0/binary_slowdown[0]->slowdown.getSlowDownFactor();
Float de = Float(0.0);
for (int i=1; i<n; i++) {
auto& sdi = binary_slowdown[i];
ASSERT(sdi!=NULL);
Float kappa = sdi->slowdown.getSlowDownFactor();
Float kappa_inv_m_one = 1.0/kappa - 1.0;
Float* velcm = sdi->getVel();
for (int k=0; k<2; k++) {
int j = sdi->getMemberIndex(k);
ASSERT(j>=0&&j<particles.getSize());
Float* vel = particles[j].getVel();
// only scale velocity referring to binary c.m.
Float vrel[3] = { vel[0] - velcm[0],
vel[1] - velcm[1],
vel[2] - velcm[2]};
de += kappa_inv_m_one * particles[j].mass * (vrel[0]*vrel[0] + vrel[1]*vrel[1] + vrel[2]*vrel[2]);
}
}
ekin_sd_ = (_ekin + 0.5*de)*kappa_inv_global;
}
//! find inner binaries for slowdown treatment iteration function
static int findSlowDownInnerBinaryIter(COMM::List<AR::BinaryTree<Tparticle>*>& _binary_slowdown, const int& _c1, const int& _c2, AR::BinaryTree<Tparticle>& _bin) {
// find leaf binary
if (_bin.getMemberN()==2 && _bin.semi>0.0) {
_binary_slowdown.increaseSizeNoInitialize(1);
auto& sdi_new = _binary_slowdown.getLastMember();
sdi_new = &_bin;
return _c1+_c2+1;
}
else return _c1+_c2;
}
//! find inner binaries for slowdown treatment
/*! record binary tree address and set update time to _time
@param[in] _time: next slowdown update time
*/
void findSlowDownInner(const Float _time) {
auto& bin_root = info.getBinaryTreeRoot();
binary_slowdown.resizeNoInitialize(1);
int ncount[2]={0,0};
int nsd = bin_root.processTreeIter(binary_slowdown, ncount[0], ncount[1], findSlowDownInnerBinaryIter);
ASSERT(nsd==binary_slowdown.getSize()-1);
for (int i=1; i<=nsd; i++) {
#ifdef AR_SLOWDOWN_MASSRATIO
const Float mass_ratio = manager->slowdown_mass_ref/binary_slowdown[i].bin->mass;
#else
const Float mass_ratio = 1.0;
#endif
binary_slowdown[i]->slowdown.initialSlowDownReference(mass_ratio*manager->slowdown_pert_ratio_ref, manager->slowdown_timescale_max);
binary_slowdown[i]->slowdown.setUpdateTime(time_);
}
}
#endif // AR_SLOWDOWN_ARRAY
//! Calculate kinetic energy
inline void calcEKin(){
ekin_ = Float(0.0);
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
for (int i=0; i<num; i++) {
const Float *vi=pdat[i].getVel();
ekin_ += 0.5 * pdat[i].mass * (vi[0]*vi[0]+vi[1]*vi[1]+vi[2]*vi[2]);
}
#ifdef AR_SLOWDOWN_ARRAY
calcEkinSlowDownInner(ekin_);
#endif
}
//! kick velocity
/*! First time step will be calculated, the velocities are kicked
@param[in] _dt: time size
*/
inline void kickVel(const Float _dt) {
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
Force* force = force_.getDataAddress();
for (int i=0; i<num; i++) {
// kick velocity
Float* vel = pdat[i].getVel();
Float* acc = force[i].acc_in;
Float* pert= force[i].acc_pert;
// half dv
vel[0] += _dt * (acc[0] + pert[0]);
vel[1] += _dt * (acc[1] + pert[1]);
vel[2] += _dt * (acc[2] + pert[2]);
}
#ifdef AR_SLOWDOWN_ARRAY
// update c.m. of binaries
updateCenterOfMassForBinaryWithSlowDownInner();
#endif
}
//! drift time and position
/*! First (real) time is drifted, then positions are drifted
@param[in] _dt: time step
*/
inline void driftTimeAndPos(const Float _dt) {
// drift time
time_ += _dt;
// drift position
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
#ifdef AR_SLOWDOWN_ARRAY
const Float kappa_inv = 1.0/binary_slowdown[0]->slowdown.getSlowDownFactor();
const Float dt_sd = _dt * kappa_inv;
for (int i=0; i<num; i++) {
Float* pos = pdat[i].getPos();
Float* vel = pdat[i].getVel();
pos[0] += dt_sd * vel[0];
pos[1] += dt_sd * vel[1];
pos[2] += dt_sd * vel[2];
}
// correct postion drift due to inner binary slowdown
correctPosSlowDownInner(_dt, kappa_inv);
#else
for (int i=0; i<num; i++) {
Float* pos = pdat[i].getPos();
Float* vel = pdat[i].getVel();
pos[0] += _dt * vel[0];
pos[1] += _dt * vel[1];
pos[2] += _dt * vel[2];
}
#endif
}
//! calc force, potential and inverse time transformation factor for kick
/*!
\return gt_kick_inv: inverse time transformation factor for kick
*/
inline Float calcAccPotAndGTKickInv() {
Float gt_kick_inv = manager->interaction.calcAccPotAndGTKickInv(force_.getDataAddress(), epot_, particles.getDataAddress(), particles.getSize(), particles.cm, perturber, getTime());
//#ifdef AR_DEBUG
// // check c.m. force
// Force fcm;
// Float mcm=0.0;
// for (int i=0; i<particles.getSize(); i++) {
// for (int k=0; k<3; k++) {
// fcm.acc_in[k] += particles[i].mass * force_[i].acc_in[k];
// }
// mcm += particles[i].mass;
// }
// for (int k=0; k<3; k++) {
// fcm.acc_in[k] /= mcm;
// ASSERT(abs(fcm.acc_in[k])<1e-10);
// }
//#endif
#ifdef AR_SLOWDOWN_ARRAY
// slowdown binary acceleration
correctAccPotGTKickInvSlowDownInner(gt_kick_inv);
//#ifdef AR_DEBUG
// // check c.m. force
// fcm.acc_in[0] = fcm.acc_in[1] = fcm.acc_in[2] = 0.0;
// for (int i=0; i<particles.getSize(); i++) {
// for (int k=0; k<3; k++) {
// fcm.acc_in[k] += particles[i].mass * force_[i].acc_in[k];
// }
// }
// for (int k=0; k<3; k++) {
// fcm.acc_in[k] /= mcm;
// ASSERT(abs(fcm.acc_in[k])<1e-10);
// }
//#endif
#endif
return gt_kick_inv;
}
#ifdef AR_TTL
//! kick energy and time transformation function for drift
/*!
@param[in] _dt: time step
*/
inline void kickEtotAndGTDrift(const Float _dt) {
Float de = Float(0.0);
Float dg = Float(0.0);
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
Force* force = force_.getDataAddress();
for (int i=0;i<num;i++) {
Float mass= pdat[i].mass;
Float* vel = pdat[i].getVel();
Float* pert= force[i].acc_pert;
Float* gtgrad=force[i].gtgrad;
de += mass * (vel[0] * pert[0] +
vel[1] * pert[1] +
vel[2] * pert[2]);
dg += (vel[0] * gtgrad[0] +
vel[1] * gtgrad[1] +
vel[2] * gtgrad[2]);
}
etot_ref_ += _dt * de;
#ifdef AR_SLOWDOWN_ARRAY
etot_sd_ref_ += _dt * de;
// correct gt_drift_inv
const Float kappa_inv_global = 1.0/binary_slowdown[0]->slowdown.getSlowDownFactor();
int n = binary_slowdown.getSize();
for (int i=1; i<n; i++) {
auto& sdi = binary_slowdown[i];
ASSERT(sdi!=NULL);
Float kappa = sdi->slowdown.getSlowDownFactor();
Float kappa_inv = 1.0/kappa;
Float* velcm = sdi->getVel();
for (int k=0; k<2; k++) {
int j = sdi->getMemberIndex(k);
if (j>=0) {
ASSERT(j<particles.getSize());
Float* gtgrad=force_[j].gtgrad;
Float* vel = particles[j].getVel();
Float vrel[3] = { vel[0] - velcm[0],
vel[1] - velcm[1],
vel[2] - velcm[2]};
dg += (vrel[0] * (kappa_inv-1)* gtgrad[0] +
vrel[1] * (kappa_inv-1)* gtgrad[1] +
vrel[2] * (kappa_inv-1)* gtgrad[2]);
}
}
}
gt_drift_inv_ += _dt * dg *kappa_inv_global;
#else
gt_drift_inv_ += _dt * dg;
#endif
}
#else //!AR_TTL
//! kick energy
/*!
@param[in] _dt: time step
*/
inline void kickEtot(const Float _dt) {
Float de = 0.0;
const int num = particles.getSize();
Tparticle* pdat = particles.getDataAddress();
Force* force = force_.getDataAddress();
for (int i=0;i<num;i++) {
Float mass= pdat[i].mass;
Float* vel = pdat[i].getVel();
Float* pert= force[i].acc_pert;
de += mass * (vel[0] * pert[0] +
vel[1] * pert[1] +
vel[2] * pert[2]);
}
#ifdef AR_SLOWDOWN_ARRAY
etot_sd_ref_ += _dt * de;
#endif
etot_ref_ += _dt * de;
}
#endif //AR_TTL
#endif // AR_SLOWDOWN_TREE
//! update slowdown velocity iteration function with binary tree
void updateBinaryVelIter(AR::BinaryTree<Tparticle>& _bin) {
_bin.vel[0]= _bin.vel[1] = _bin.vel[2] = 0.0;
Float mcm_inv = 1.0/_bin.mass;
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
updateBinaryVelIter(*bink);
_bin.vel[0] += bink->mass*bink->vel[0];
_bin.vel[1] += bink->mass*bink->vel[1];
_bin.vel[2] += bink->mass*bink->vel[2];
}
else {
auto* pk = _bin.getMember(k);
_bin.vel[0] += pk->mass*pk->vel[0];
_bin.vel[1] += pk->mass*pk->vel[1];
_bin.vel[2] += pk->mass*pk->vel[2];
}
}
_bin.vel[0] *= mcm_inv;
_bin.vel[1] *= mcm_inv;
_bin.vel[2] *= mcm_inv;
}
//! update binary cm iteratively
void updateBinaryCMIter(AR::BinaryTree<Tparticle>& _bin) {
_bin.pos[0]= _bin.pos[1] = _bin.pos[2] = 0.0;
_bin.vel[0]= _bin.vel[1] = _bin.vel[2] = 0.0;
Float mcm_member = 0.0;
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
updateBinaryCMIter(*bink);
mcm_member += bink->mass;
_bin.pos[0] += bink->mass*bink->pos[0];
_bin.pos[1] += bink->mass*bink->pos[1];
_bin.pos[2] += bink->mass*bink->pos[2];
_bin.vel[0] += bink->mass*bink->vel[0];
_bin.vel[1] += bink->mass*bink->vel[1];
_bin.vel[2] += bink->mass*bink->vel[2];
}
else {
auto* pk = _bin.getMember(k);
mcm_member += pk->mass;
_bin.pos[0] += pk->mass*pk->pos[0];
_bin.pos[1] += pk->mass*pk->pos[1];
_bin.pos[2] += pk->mass*pk->pos[2];
_bin.vel[0] += pk->mass*pk->vel[0];
_bin.vel[1] += pk->mass*pk->vel[1];
_bin.vel[2] += pk->mass*pk->vel[2];
}
}
Float mcm_inv = 1.0/mcm_member;
_bin.mass = mcm_member;
_bin.m1 = _bin.getLeftMember()->mass;
_bin.m2 = _bin.getRightMember()->mass;
_bin.vel[0] *= mcm_inv;
_bin.vel[1] *= mcm_inv;
_bin.vel[2] *= mcm_inv;
_bin.pos[0] *= mcm_inv;
_bin.pos[1] *= mcm_inv;
_bin.pos[2] *= mcm_inv;
}
//! set all binary c.m. mass to zero
void setBinaryCMZeroIter(AR::BinaryTree<Tparticle>& _bin) {
_bin.mass = 0.0;
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
setBinaryCMZeroIter(*bink);
}
}
}
//! update binary semi, ecc and period iteratively after 0.25 period for unstable systems
bool updateBinarySemiEccPeriodIter(AR::BinaryTree<Tparticle>& _bin, const Float& _G, const Float _time, const bool _check=false) {
bool check = _check;
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
check=updateBinarySemiEccPeriodIter(*bink, _G, _time, _check);
}
}
if ((_time>_bin.stab_check_time&&_bin.stab>1.0&&_bin.m1>0.0&&_bin.m2>0.0)||check) {
_bin.calcSemiEccPeriod(_G);
_bin.stab_check_time = _time + _bin.period;
return true;
}
return false;
}
public:
#ifdef AR_SLOWDOWN_TREE
//! update slowdown factor based on perturbation and record slowdown energy change
/*! Update slowdown inner and global.
@param [in] _update_energy_flag: Record cumulative slowdown energy change if true;
@param [in] _stable_check_flag: check whether the binary tree is stable if true;
*/
void updateSlowDownAndCorrectEnergy(const bool _update_energy_flag, const bool _stable_check_flag) {
auto& bin_root = info.getBinaryTreeRoot();
auto& sd_root = bin_root.slowdown;
#ifdef AR_TTL
Float sd_backup = sd_root.getSlowDownFactor();
#endif
// when the maximum inner slowdown is large, the outer should not be slowed down since the system may not be stable.
//if (inner_sd_change_flag&&sd_org_inner_max<1000.0*manager->slowdown_pert_ratio_ref) sd_root.setSlowDownFactor(1.0);
//if (time_>=sd_root.getUpdateTime()) {
sd_root.pert_in = manager->interaction.calcPertFromBinary(bin_root);
sd_root.pert_out = 0.0;
Float t_min_sq= NUMERIC_FLOAT_MAX;
manager->interaction.calcSlowDownPert(sd_root.pert_out, t_min_sq, getTime(), particles.cm, perturber);
sd_root.timescale = std::min(sd_root.getTimescaleMax(), sqrt(t_min_sq));
//Float period_amplify_max = NUMERIC_FLOAT_MAX;
if (_stable_check_flag) {
// check whether the system is stable for 10000 out period and the apo-center is below break criterion
Float stab = bin_root.stableCheckIter(bin_root,10000*bin_root.period);
Float apo = bin_root.semi*(1+bin_root.ecc);
if (stab<1.0 && apo<info.r_break_crit) {
sd_root.period = bin_root.period;
sd_root.calcSlowDownFactor();
}
else sd_root.setSlowDownFactor(1.0);
// stablility criterion
// The slowdown factor should not make the system unstable, thus the Qst/Q set the limitation of the increasing of inner semi-major axis.
//if (stab>0 && stab != NUMERIC_FLOAT_MAX) {
// Float semi_amplify_max = std::max(Float(1.0),1.0/stab);
// period_amplify_max = pow(semi_amplify_max,3.0/2.0);
//}
}
else if (bin_root.semi>0) {
sd_root.period = bin_root.period;
sd_root.calcSlowDownFactor();
}
else sd_root.setSlowDownFactor(1.0);
//sd_root.increaseUpdateTimeOnePeriod();
//}
// inner binary slowdown
Float sd_org_inner_max = 0.0;
bool inner_sd_change_flag=false;
int n_bin = info.binarytree.getSize();
for (int i=0; i<n_bin-1; i++) {
auto& bini = info.binarytree[i];
//if (time_>=bini.slowdown.getUpdateTime()) {
bini.calcCenterOfMass();
calcSlowDownInnerBinary(bini);
//sdi->slowdown.increaseUpdateTimeOnePeriod();
sd_org_inner_max = std::max(bini.slowdown.getSlowDownFactorOrigin(),sd_org_inner_max);
inner_sd_change_flag=true;
//}
}
if (_update_energy_flag) {
Float ekin_sd_bk = ekin_sd_;
Float epot_sd_bk = epot_sd_;
Float H_sd_bk = getHSlowDown();
if(inner_sd_change_flag) {
#ifdef AR_TTL
Float gt_kick_inv_new = calcAccPotAndGTKickInv();
gt_drift_inv_ += gt_kick_inv_new - gt_kick_inv_;
gt_kick_inv_ = gt_kick_inv_new;
#else
calcAccPotAndGTKickInv();
#endif
calcEKin();
}
else {
Float kappa_inv = 1.0/sd_root.getSlowDownFactor();
#ifdef AR_TTL
Float gt_kick_inv_new = gt_kick_inv_*sd_backup*kappa_inv;
gt_drift_inv_ += gt_kick_inv_new - gt_kick_inv_;
gt_kick_inv_ = gt_kick_inv_new;
#endif
ekin_sd_ = ekin_*kappa_inv;
epot_sd_ = epot_*kappa_inv;
}
Float de_sd = (ekin_sd_ - ekin_sd_bk) + (epot_sd_ - epot_sd_bk);
etot_sd_ref_ += de_sd;
Float dH_sd = getHSlowDown() - H_sd_bk;
// add slowdown change to the global slowdown energy
de_sd_change_cum_ += de_sd;
dH_sd_change_cum_ += dH_sd;
}
}
#elif AR_SLOWDOWN_ARRAY
//! update slowdown factor based on perturbation and record slowdown energy change
/*! Update slowdown inner and global, update gt_inv
@param [in] _update_energy_flag: Record cumulative slowdown energy change if true;
@param [in] _stable_check_flag: check whether the binary tree is stable if true;
*/
void updateSlowDownAndCorrectEnergy(const bool _update_energy_flag, const bool _stable_check_flag) {
auto& bin_root = *binary_slowdown[0];
auto& sd_root = bin_root.slowdown;
#ifdef AR_TTL
Float sd_backup = sd_root.getSlowDownFactor();
#endif
// when the maximum inner slowdown is large, the outer should not be slowed down since the system may not be stable.
//if (inner_sd_change_flag&&sd_org_inner_max<1000.0*manager->slowdown_pert_ratio_ref) sd_root.setSlowDownFactor(1.0);
//if (time_>=sd_root.getUpdateTime()) {
sd_root.pert_in = manager->interaction.calcPertFromBinary(bin_root);
sd_root.pert_out = 0.0;
Float t_min_sq= NUMERIC_FLOAT_MAX;
manager->interaction.calcSlowDownPert(sd_root.pert_out, t_min_sq, getTime(), particles.cm, perturber);
sd_root.timescale = std::min(sd_root.getTimescaleMax(), sqrt(t_min_sq));
//Float period_amplify_max = NUMERIC_FLOAT_MAX;
if (_stable_check_flag) {
// check whether the system is stable for 10000 out period
Float stab = bin_root.stableCheckIter(bin_root,10000*bin_root.period);
if (stab<1.0) {
sd_root.period = bin_root.period;
sd_root.calcSlowDownFactor();
}
else sd_root.setSlowDownFactor(1.0);
// stablility criterion
// The slowdown factor should not make the system unstable, thus the Qst/Q set the limitation of the increasing of inner semi-major axis.
//if (stab>0) {
// Float semi_amplify_max = std::max(Float(1.0),1.0/stab);
// period_amplify_max = pow(semi_amplify_max,3.0/2.0);
//}
}
else if (bin_root.semi>0) {
sd_root.period = bin_root.period;
sd_root.calcSlowDownFactor();
}
else sd_root.setSlowDownFactor(1.0);
//sd_root.increaseUpdateTimeOnePeriod();
//}
// inner binary slowdown
int n = binary_slowdown.getSize();
bool modified_flag=false;
for (int i=1; i<n; i++) {
auto* sdi = binary_slowdown[i];
//if (time_>=sdi->slowdown.getUpdateTime()) {
sdi->calcCenterOfMass();
calcSlowDownInnerBinary(*sdi);
//sdi->slowdown.increaseUpdateTimeOnePeriod();
modified_flag=true;
//}
}
if (_update_energy_flag) {
Float ekin_sd_bk = ekin_sd_;
Float epot_sd_bk = epot_sd_;
Float H_sd_bk = getHSlowDown();
if (modified_flag) {
// initialize the gt_drift_inv_ with new slowdown factor
Float gt_kick_inv_sdi = manager->interaction.calcAccPotAndGTKickInv(force_.getDataAddress(), epot_, particles.getDataAddress(), particles.getSize(), particles.cm, perturber, getTime());
correctAccPotGTKickInvSlowDownInner(gt_kick_inv_sdi);
#ifdef AR_TTL
gt_drift_inv_ += gt_kick_inv_sdi - gt_kick_inv_;
gt_kick_inv_ = gt_kick_inv_sdi;
#endif
// correct etot_sd_ref_ with new slowdown
calcEkinSlowDownInner(ekin_);
}
else {
Float kappa_inv = 1.0/sd_root.getSlowDownFactor();
#ifdef AR_TTL
Float gt_kick_inv_sdi = gt_kick_inv_*sd_backup*kappa_inv;
gt_drift_inv_ += gt_kick_inv_sdi - gt_kick_inv_;
gt_kick_inv_ = gt_kick_inv_sdi;
#endif
// only need to correct the total value
ekin_sd_ = ekin_*kappa_inv;
epot_sd_ = epot_*kappa_inv;
}
Float de_sd = (ekin_sd_ - ekin_sd_bk) + (epot_sd_ - epot_sd_bk);
etot_sd_ref_ += de_sd;
Float dH_sd = getHSlowDown() - H_sd_bk;
// add slowdown change to the global slowdown energy
de_sd_change_cum_ += de_sd;
dH_sd_change_cum_ += dH_sd;
}
}
#endif
//! initialization for integration
/*! initialize the system. Acceleration, energy and time transformation factors are updated. If the center-of-mass is not yet calculated, the system will be shifted to center-of-mass frame.
@param[in] _time: real physical time to initialize
*/
void initialIntegration(const Float _time) {
ASSERT(checkParams());
// particle number and data address
const int n_particle = particles.getSize();
// resize force array
force_.resizeNoInitialize(n_particle);
// Initial intgrt value t (avoid confusion of real time when slowdown is used)
time_ = _time;
// check particle number
ASSERT(particles.getSize()>=2);
// reset particle modification flag
particles.setModifiedFalse();
// check the center-of-mass initialization
if(particles.isOriginFrame()) {
particles.calcCenterOfMass();
particles.shiftToCenterOfMassFrame();
for (int i=0; i<info.binarytree.getSize(); i++) {
auto& bin = info.binarytree[i];
bin.pos[0] -= particles.cm.pos[0];
bin.pos[1] -= particles.cm.pos[1];
bin.pos[2] -= particles.cm.pos[2];
bin.vel[0] -= particles.cm.vel[0];
bin.vel[1] -= particles.cm.vel[1];
bin.vel[2] -= particles.cm.vel[2];
}
}
ASSERT(info.getBinaryTreeRoot().pos[0]*info.getBinaryTreeRoot().pos[0]<1e-10);
ASSERT(info.getBinaryTreeRoot().vel[0]*info.getBinaryTreeRoot().vel[0]<1e-10);
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
#ifdef AR_SLOWDOWN_TREE
for (int i=0; i<info.binarytree.getSize(); i++)
info.binarytree[i].slowdown.initialSlowDownReference(manager->slowdown_pert_ratio_ref, manager->slowdown_timescale_max);
#else
binary_slowdown.increaseSizeNoInitialize(1);
binary_slowdown[0] = &info.getBinaryTreeRoot();
// set slowdown reference
SlowDown& slowdown_root = info.getBinaryTreeRoot().slowdown;
// slowdown for the system
slowdown_root.initialSlowDownReference(manager->slowdown_pert_ratio_ref, manager->slowdown_timescale_max);
if (particles.getSize()>2) {
findSlowDownInner(time_);
// update c.m. of binaries
//updateCenterOfMassForBinaryWithSlowDownInner();
}
#endif
updateSlowDownAndCorrectEnergy(false,true);
#ifdef AR_TTL
gt_kick_inv_ = calcAccPotAndGTKickInv();
// initially gt_drift
gt_drift_inv_ = gt_kick_inv_;
#else
calcAccPotAndGTKickInv();
#endif
calcEKin();
etot_ref_ = ekin_ + epot_;
etot_sd_ref_ = ekin_sd_ + epot_sd_;
Float de_sd = etot_sd_ref_ - etot_ref_;
// add slowdown change to the global slowdown energy
de_sd_change_cum_ += de_sd;
dH_sd_change_cum_ = 0.0;
#else // No SLOWDOWN
Tparticle* particle_data = particles.getDataAddress();
Force* force_data = force_.getDataAddress();
#ifdef AR_TTL
gt_kick_inv_ = manager->interaction.calcAccPotAndGTKickInv(force_data, epot_, particle_data, n_particle, particles.cm, perturber, _time);
// initially gt_drift
gt_drift_inv_ = gt_kick_inv_;
#else
manager->interaction.calcAccPotAndGTKickInv(force_data, epot_, particle_data, n_particle, particles.cm, perturber, _time);
#endif
// calculate kinetic energy
calcEKin();
// initial total energy
etot_ref_ = ekin_ + epot_;
#endif
}
//! integration for one step
/*!
@param[in] _ds: step size
@param[out] _time_table: for high order symplectic integration, store the substep integrated (real) time, used for estimate the step for time synchronization, size should be consistent with step.getCDPairSize().
*/
void integrateOneStep(const Float _ds, Float _time_table[]) {
ASSERT(checkParams());
ASSERT(!particles.isModified());
ASSERT(_ds>0);
// symplectic step coefficent group n_particleber
const int nloop = manager->step.getCDPairSize();
for (int i=0; i<nloop; i++) {
// step for drift
Float ds_drift = manager->step.getCK(i)*_ds;
// inverse time transformation factor for drift
#ifdef AR_TTL
Float gt_drift_inv = gt_drift_inv_;
#else
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float gt_drift_inv = manager->interaction.calcGTDriftInv(ekin_sd_-etot_sd_ref_); // pt = -etot
#else
Float gt_drift_inv = manager->interaction.calcGTDriftInv(ekin_-etot_ref_); // pt = -etot
#endif
#endif
// drift
Float dt_drift = ds_drift/gt_drift_inv;
// drift time and postion
driftTimeAndPos(dt_drift);
_time_table[i] = time_;
// step for kick
Float ds_kick = manager->step.getDK(i)*_ds;
//! calc force, potential and inverse time transformation factor for kick
Float gt_kick_inv = calcAccPotAndGTKickInv();
// time step for kick
Float dt_kick = ds_kick/gt_kick_inv;
// kick half step for velocity
kickVel(0.5*dt_kick);
#ifdef AR_TTL
// back up gt_kick
gt_kick_inv_ = gt_kick_inv;
// kick total energy and inverse time transformation factor for drift
kickEtotAndGTDrift(dt_kick);
#else
// kick total energy
kickEtot(dt_kick);
#endif
// kick half step for velocity
kickVel(0.5*dt_kick);
// calculate kinetic energy
calcEKin();
}
}
//! integration for two body one step
/*! For two-body problem the calculation can be much symplified to improve performance.
Besides, the slow-down factor calculation is embedded in the Drift (for time) and Kick (for perturbation).
@param[in] _ds: step size
@param[out] _time_table: for high order symplectic integration, store the substep integrated (real) time, used for estimate the step for time synchronization, size should be consistent with step.getCDPairSize().
*/
void integrateTwoOneStep(const Float _ds, Float _time_table[]) {
ASSERT(checkParams());
ASSERT(!particles.isModified());
ASSERT(_ds>0);
// symplectic step coefficent group number
const int nloop = manager->step.getCDPairSize();
const int n_particle = particles.getSize();
ASSERT(n_particle==2);
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
const Float kappa_inv = 1.0/info.getBinaryTreeRoot().slowdown.getSlowDownFactor();
#endif
Tparticle* particle_data = particles.getDataAddress();
Float mass1 = particle_data[0].mass;
Float* pos1 = particle_data[0].getPos();
Float* vel1 = particle_data[0].getVel();
Float mass2 = particle_data[1].mass;
Float* pos2 = particle_data[1].getPos();
Float* vel2 = particle_data[1].getVel();
Force* force_data = force_.getDataAddress();
Float* acc1 = force_data[0].acc_in;
Float* pert1= force_data[0].acc_pert;
Float* acc2 = force_data[1].acc_in;
Float* pert2= force_data[1].acc_pert;
#ifdef AR_TTL
Float* gtgrad1 = force_data[0].gtgrad;
Float* gtgrad2 = force_data[1].gtgrad;
#endif
#ifdef AR_DEBUG_PRINT_DKD
std::cout<<"K "<<time_<<" "
<<pos2[0]-pos1[0]<<" "<<pos2[1]-pos1[1]<<" "<<pos2[2]-pos1[2]<<" "
<<vel2[0]-vel1[0]<<" "<<vel2[1]-vel1[1]<<" "<<vel2[2]-vel1[2]<<" "
<<ekin_<<" "<<epot_<<" "<<etot_ref_<<std::endl;
#endif
for (int i=0; i<nloop; i++) {
// step for drift
Float ds = manager->step.getCK(i)*_ds;
// inverse time transformation factor for drift
#ifdef AR_TTL
Float gt_inv = gt_drift_inv_;
#else
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float gt_inv = manager->interaction.calcGTDriftInv(ekin_sd_-etot_sd_ref_); // pt = -etot_sd
#else
Float gt_inv = manager->interaction.calcGTDriftInv(ekin_-etot_ref_); // pt = -etot
#endif
#endif
// drift
Float dt = ds/gt_inv;
ASSERT(!ISNAN(dt));
// drift time
time_ += dt;
// update real time
_time_table[i] = time_;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float dt_sd = dt*kappa_inv;
// drift position
pos1[0] += dt_sd * vel1[0];
pos1[1] += dt_sd * vel1[1];
pos1[2] += dt_sd * vel1[2];
pos2[0] += dt_sd * vel2[0];
pos2[1] += dt_sd * vel2[1];
pos2[2] += dt_sd * vel2[2];
#else
// drift position
pos1[0] += dt * vel1[0];
pos1[1] += dt * vel1[1];
pos1[2] += dt * vel1[2];
pos2[0] += dt * vel2[0];
pos2[1] += dt * vel2[1];
pos2[2] += dt * vel2[2];
#endif
// step for kick
ds = manager->step.getDK(i)*_ds;
gt_inv = manager->interaction.calcAccPotAndGTKickInv(force_data, epot_, particle_data, n_particle, particles.cm, perturber, _time_table[i]);
ASSERT(!ISNAN(epot_));
#ifdef AR_DEBUG_PRINT_DKD
if (i>0)
std::cout<<"K "<<time_<<" "
<<pos2[0]-pos1[0]<<" "<<pos2[1]-pos1[1]<<" "<<pos2[2]-pos1[2]<<" "
<<vel2[0]-vel1[0]<<" "<<vel2[1]-vel1[1]<<" "<<vel2[2]-vel1[2]<<" "
<<ekin_<<" "<<epot_<<" "<<etot_ref_<<std::endl;
#endif
// kick half step for velocity
Float dvel1[3], dvel2[3];
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
// time step for kick
gt_inv *= kappa_inv;
dt = 0.5*ds/gt_inv;
dvel1[0] = dt * (acc1[0]*kappa_inv + pert1[0]);
dvel1[1] = dt * (acc1[1]*kappa_inv + pert1[1]);
dvel1[2] = dt * (acc1[2]*kappa_inv + pert1[2]);
dvel2[0] = dt * (acc2[0]*kappa_inv + pert2[0]);
dvel2[1] = dt * (acc2[1]*kappa_inv + pert2[1]);
dvel2[2] = dt * (acc2[2]*kappa_inv + pert2[2]);
#else
dt = 0.5*ds/gt_inv;
dvel1[0] = dt * (acc1[0] + pert1[0]);
dvel1[1] = dt * (acc1[1] + pert1[1]);
dvel1[2] = dt * (acc1[2] + pert1[2]);
dvel2[0] = dt * (acc2[0] + pert2[0]);
dvel2[1] = dt * (acc2[1] + pert2[1]);
dvel2[2] = dt * (acc2[2] + pert2[2]);
#endif
vel1[0] += dvel1[0];
vel1[1] += dvel1[1];
vel1[2] += dvel1[2];
vel2[0] += dvel2[0];
vel2[1] += dvel2[1];
vel2[2] += dvel2[2];
#ifdef AR_DEBUG_PRINT_DKD
std::cout<<"D "<<time_<<" "
<<pos2[0]-pos1[0]<<" "<<pos2[1]-pos1[1]<<" "<<pos2[2]-pos1[2]<<" "
<<vel2[0]-vel1[0]<<" "<<vel2[1]-vel1[1]<<" "<<vel2[2]-vel1[2]<<" "
<<ekin_<<" "<<epot_<<" "<<etot_ref_<<std::endl;
#endif
// kick total energy and time transformation factor for drift
etot_ref_ += 2.0*dt * (mass1* (vel1[0] * pert1[0] +
vel1[1] * pert1[1] +
vel1[2] * pert1[2]) +
mass2* (vel2[0] * pert2[0] +
vel2[1] * pert2[1] +
vel2[2] * pert2[2]));
#ifdef AR_TTL
// back up gt_kick_inv
gt_kick_inv_ = gt_inv;
#if (defined AR_SLOWDOWN_ARRAY ) || (defined AR_SLOWDOWN_TREE)
// integrate gt_drift_inv
gt_drift_inv_ += 2.0*dt*kappa_inv*kappa_inv* (vel1[0] * gtgrad1[0] +
vel1[1] * gtgrad1[1] +
vel1[2] * gtgrad1[2] +
vel2[0] * gtgrad2[0] +
vel2[1] * gtgrad2[1] +
vel2[2] * gtgrad2[2]);
#else
// integrate gt_drift_inv
gt_drift_inv_ += 2.0*dt* (vel1[0] * gtgrad1[0] +
vel1[1] * gtgrad1[1] +
vel1[2] * gtgrad1[2] +
vel2[0] * gtgrad2[0] +
vel2[1] * gtgrad2[1] +
vel2[2] * gtgrad2[2]);
#endif
#endif // AR_TTL
// kick half step for velocity
vel1[0] += dvel1[0];
vel1[1] += dvel1[1];
vel1[2] += dvel1[2];
vel2[0] += dvel2[0];
vel2[1] += dvel2[1];
vel2[2] += dvel2[2];
// calculate kinetic energy
ekin_ = 0.5 * (mass1 * (vel1[0]*vel1[0]+vel1[1]*vel1[1]+vel1[2]*vel1[2]) +
mass2 * (vel2[0]*vel2[0]+vel2[1]*vel2[1]+vel2[2]*vel2[2]));
}
#if (defined AR_SLOWDOWN_ARRAY ) || (defined AR_SLOWDOWN_TREE)
// make consistent slowdown inner energy
etot_sd_ref_ = etot_ref_*kappa_inv;
ekin_sd_ = ekin_*kappa_inv;
epot_sd_ = epot_*kappa_inv;
#endif
}
// Integrate the system to a given time
/*!
@param[in] _ds: the integration step size
@param[in] _time_end: the expected finishing time without offset
\return binary tree of the pair which triggers interruption condition
*/
InterruptBinary<Tparticle> integrateToTime(const Float _time_end) {
ASSERT(checkParams());
// real full time step
const Float dt_full = _time_end - time_;
// time error
const Float time_error = manager->time_error_max;
// energy error limit
const Float energy_error_rel_max = manager->energy_error_relative_max;
// expect energy_error using half step if energy_error_rel_max reached
//const Float energy_error_rel_max_half_step = energy_error_rel_max * manager->step.calcErrorRatioFromStepModifyFactor(0.5);
//const Float dt_min = manager->time_step_min;
// backup data size
const int bk_data_size = getBackupDataSize();
Float backup_data[bk_data_size]; // for backup chain data
#ifdef AR_DEBUG_DUMP
Float backup_data_init[bk_data_size]; // for backup initial data
#endif
bool backup_flag=true; // flag for backup or restore
// time table
const int cd_pair_size = manager->step.getCDPairSize();
Float time_table[cd_pair_size]; // for storing sub-integrated time
// two switch step control
Float ds[2] = {info.ds,info.ds}; // step with a buffer
Float ds_init = info.ds; //backup initial step
int ds_switch=0; // 0 or 1
// reduce ds control, three level
const int n_reduce_level_max=10;
struct DsBackupManager{
Float ds_backup[n_reduce_level_max+1];
int n_step_wait_recover_ds[n_reduce_level_max+1];
int n_reduce_level;
DsBackupManager(const Float _ds) { initial(_ds), n_reduce_level = -1; }
void initial(const Float _ds) {
for (int i=0; i<n_reduce_level_max; i++) {
ds_backup[i] = _ds;
n_step_wait_recover_ds[i] = 0;
}
}
// shift backup by one level, if successful, return true
bool shiftReduceLevel() {
if (n_reduce_level>0) {
for (int i=0; i<n_reduce_level-1; i++) {
ds_backup[i] =ds_backup[i+1];
n_step_wait_recover_ds[i] = n_step_wait_recover_ds[i+1];
}
n_reduce_level--;
return true;
}
return false;
}
// record ds to backup
void backup(const Float _ds, const Float _modify_factor) {
//if (n_reduce_level==n_reduce_level_max) shiftReduceLevel(); // not converse sometimes, becomes infinite small steps
if (n_reduce_level==n_reduce_level_max)
n_step_wait_recover_ds[n_reduce_level] += 2*to_int(1.0/_modify_factor);
else {
n_reduce_level++;
ds_backup[n_reduce_level] = _ds;
n_step_wait_recover_ds[n_reduce_level] = 2*to_int(1.0/_modify_factor);
}
}
// count step (return false) and recover ds if necessary (return true)
bool countAndRecover(Float &_ds, Float &_modify_factor, const bool _recover_flag) {
if (n_reduce_level>=0) {
if (n_step_wait_recover_ds[n_reduce_level] ==0) {
if (_recover_flag) {
_modify_factor = ds_backup[n_reduce_level]/_ds;
_ds = ds_backup[n_reduce_level];
n_step_wait_recover_ds[n_reduce_level] = -1;
n_reduce_level--;
return true;
}
}
else {
n_step_wait_recover_ds[n_reduce_level]--;
return false;
}
}
return false;
}
} ds_backup(info.ds);
int reduce_ds_count=0; // number of reduce ds (ignore first few steps)
Float step_modify_factor=1.0; // step modify factor
Float previous_step_modify_factor=1.0; // step modify factor
Float previous_error_ratio=-1; // previous error ratio when ds is reduced
bool previous_is_restore=false; // previous step is reduced or not
// time end control
int n_step_end=0; // number of steps integrated to reach the time end for one during the time sychronization sub steps
bool time_end_flag=false; // indicate whether time reach the end
// step count
long long unsigned int step_count=0; // integration step
long long unsigned int step_count_tsyn=0; // time synchronization step
InterruptBinary<Tparticle> bin_interrupt;
bin_interrupt.time_now=time_ + info.time_offset;
bin_interrupt.time_end=_time_end + info.time_offset;
InterruptBinary<Tparticle> bin_interrupt_return = bin_interrupt;
// particle data
const int n_particle = particles.getSize();
/* This must suppress since after findslowdowninner, slowdown inner is reset to 1.0, recalculate ekin_sdi give completely wrong value for energy correction for slowdown change later
#ifdef AR_DEBUG
Float ekin_check = ekin_;
calcEKin();
ASSERT(abs(ekin_check-ekin_)<1e-10);
ekin_ = ekin_check;
#endif
*/
// warning print flag
bool warning_print_once=true;
#ifdef AR_DEBUG_DUMP
// back up initial data
backupIntData(backup_data_init);
#endif
#ifdef AR_SLOWDOWN_ARRAY
// find new inner slowdown binaries, the binary tree data may be modified, thus it is safer to recheck slowdown inner binary at beginning to avoid memory issue (bin is pointer).
#ifdef AR_TTL
if (n_particle >2) {
int nold = binary_slowdown.getSize();
findSlowDownInner(time_);
int nnew = binary_slowdown.getSize();
// in case slowdown is disabled in the next step, gt_drift_inv_ should be re-initialized
if (nold>0&&nnew==0) {
gt_kick_inv_ = manager->interaction.calcAccPotAndGTKickInv(force_.getDataAddress(), epot_, particles.getDataAddress(), particles.getSize(), particles.cm, perturber, time_);
gt_drift_inv_ = gt_kick_inv_;
}
}
#else
if (n_particle >2) findSlowDownInner(time_);
#endif
#endif
#ifdef AR_SLOWDOWN_TREE
// update slowdown and correct slowdown energy and gt_inv
updateSlowDownAndCorrectEnergy(true, true);
#endif
// reset binary stab_check_time
for (int i=0; i<info.binarytree.getSize(); i++)
info.binarytree[i].stab_check_time = time_;
// integration loop
while(true) {
// backup data
bool binary_update_flag=false;
auto& bin_root = info.getBinaryTreeRoot();
auto& G = manager->interaction.gravitational_constant;
if(backup_flag) {
// check interrupt condiction, ensure that time end not reach
if (manager->interrupt_detection_option>0 && !time_end_flag) {
bin_interrupt.time_now = time_ + info.time_offset;
bin_interrupt.time_end = _time_end + info.time_offset;
// calc perturbation energy
//Float epert=0.0;
//for (int i=0; i<n_particle; i++) {
// epert += force_[i].pot_pert*particles[i].mass;
//}
manager->interaction.modifyAndInterruptIter(bin_interrupt, bin_root);
//InterruptBinary<Tparticle>* bin_intr_ptr = &bin_interrupt;
//bin_intr_ptr = bin_root.processRootIter(bin_intr_ptr, Tmethod::modifyAndInterruptIter);
ASSERT(bin_interrupt.checkParams());
if (bin_interrupt.status!=InterruptStatus::none) {
// the mode return back to the root scope
if (manager->interrupt_detection_option==2) {
// cumulative step count
profile.step_count = step_count;
profile.step_count_tsyn = step_count_tsyn;
profile.step_count_sum += step_count;
profile.step_count_tsyn_sum += step_count_tsyn;
return bin_interrupt;
}
else {
// check whether destroy appears (all masses becomes zero)
if (bin_interrupt.status==InterruptStatus::destroy) {
// all particles become zero masses
#ifdef AR_DEBUG
for (int j=0; j<n_particle; j++) {
ASSERT(particles[j].mass==0.0);
}
#endif
de_change_interrupt_ -= etot_ref_;
dH_change_interrupt_ -= getH();
ekin_ = epot_ = etot_ref_ = 0.0;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
de_sd_change_cum_ -= etot_sd_ref_;
dH_sd_change_interrupt_ -= getHSlowDown();
ekin_sd_ = epot_sd_ = etot_sd_ref_ = 0.0;
#endif
#ifdef AR_DEBUG_PRINT
std::cerr<<"Interrupt condition triggered! Destroy";
std::cerr<<" Time: "<<time_;
bin_interrupt.adr->printColumnTitle(std::cerr);
std::cerr<<std::endl;
bin_interrupt.adr->printColumn(std::cerr);
std::cerr<<std::endl;
Tparticle::printColumnTitle(std::cerr);
std::cerr<<std::endl;
for (int j=0; j<2; j++) {
bin_interrupt.adr->getMember(j)->printColumn(std::cerr);
std::cerr<<std::endl;
}
#endif
// set binary tree mass to zero
setBinaryCMZeroIter(bin_root);
// cumulative step count
profile.step_count = step_count;
profile.step_count_tsyn = step_count_tsyn;
profile.step_count_sum += step_count;
profile.step_count_tsyn_sum += step_count_tsyn;
Float dt = _time_end - time_;
time_ += dt;
return bin_interrupt;
}
Float ekin_bk = ekin_;
Float epot_bk = epot_;
Float H_bk = getH();
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float ekin_sd_bk = ekin_sd_;
Float epot_sd_bk = epot_sd_;
Float H_sd_bk = getHSlowDown();
#endif
// update binary tree mass
info.generateBinaryTree(particles, G);
//updateBinaryCMIter(bin_root);
//updateBinarySemiEccPeriodIter(bin_root, G, time_, true);
binary_update_flag = true;
//bool stable_check=
//if (stable_check) bin_root.stableCheckIter(bin_root, 10000*bin_root.period);
// should do later, original mass still needed
//particles.cm.mass += bin_interrupt.dm;
#ifdef AR_TTL
Float gt_kick_inv_new = calcAccPotAndGTKickInv();
Float d_gt_kick_inv = gt_kick_inv_new - gt_kick_inv_;
// when the change is large, initialize gt_drift_inv_ to avoid large error
if (fabs(d_gt_kick_inv)/std::max(fabs(gt_kick_inv_),fabs(gt_kick_inv_new)) >1e-3)
gt_drift_inv_ = gt_kick_inv_new;
else
gt_drift_inv_ += d_gt_kick_inv;
gt_kick_inv_ = gt_kick_inv_new;
#else
calcAccPotAndGTKickInv();
#endif
// calculate kinetic energy
calcEKin();
// Notice initially etot_ref_ does not include epert. The perturbation effect is accumulated in the integration. Here instance change of mass does not create any work. So no need to add de_pert
// get perturbation energy change due to mass change
//Float epert_new = 0.0;
//for (int i=0; i<n_particle; i++) {
// epert_new += force_[i].pot_pert*particles[i].mass;
//}
//Float de_pert = epert_new - epert; // notice this is double perturbation potential
// get energy change
Float de = (ekin_ - ekin_bk) + (epot_ - epot_bk); //+ de_pert;
etot_ref_ += de;
de_change_interrupt_ += de;
dH_change_interrupt_ += getH() - H_bk;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float de_sd = (ekin_sd_ - ekin_sd_bk) + (epot_sd_ - epot_sd_bk);// + de_pert;
etot_sd_ref_ += de_sd;
Float dH_sd = getHSlowDown() - H_sd_bk;
// add slowdown change to the global slowdown energy
de_sd_change_interrupt_ += de_sd;
dH_sd_change_interrupt_ += dH_sd;
de_sd_change_cum_ += de_sd;
dH_sd_change_cum_ += dH_sd;
#endif //SLOWDOWN
#ifdef AR_DEBUG_PRINT
std::cerr<<"Interrupt condition triggered!";
std::cerr<<" Time: "<<time_;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
std::cerr<<" Energy change: dE_SD: "<<de_sd<<" dH_SD: "<<dH_sd;
std::cerr<<" Slowdown: "<<bin_root.slowdown.getSlowDownFactor()<<std::endl;
#endif
bin_interrupt.adr->printColumnTitle(std::cerr);
std::cerr<<std::endl;
bin_interrupt.adr->printColumn(std::cerr);
std::cerr<<std::endl;
Tparticle::printColumnTitle(std::cerr);
std::cerr<<std::endl;
for (int j=0; j<2; j++) {
bin_interrupt.adr->getMember(j)->printColumn(std::cerr);
std::cerr<<std::endl;
}
#endif
// change fix step option to make safety if energy change is large
//info.fix_step_option=FixStepOption::none;
// if time_end flag set, reset it to be safety
//time_end_flag = false;
// check merger case
if (bin_interrupt.status==InterruptStatus::merge) {
// count particle having mass
int count_mass=0;
int index_mass_last=-1;
for (int j=0; j<n_particle; j++) {
if (particles[j].mass>0.0) {
count_mass++;
index_mass_last=j;
}
}
// only one particle has mass, drift directly
if (count_mass==1) {
ASSERT(index_mass_last<n_particle&&index_mass_last>=0);
auto& p = particles[index_mass_last];
Float dt = _time_end - time_;
p.pos[0] += dt * p.vel[0];
p.pos[1] += dt * p.vel[1];
p.pos[2] += dt * p.vel[2];
// cumulative step count
profile.step_count = step_count;
profile.step_count_tsyn = step_count_tsyn;
profile.step_count_sum += step_count;
profile.step_count_tsyn_sum += step_count_tsyn;
time_ += dt;
return bin_interrupt;
}
// if only two particles have mass, switch off auto ds adjustment
if (count_mass==2) {
info.fix_step_option=FixStepOption::later;
}
//else {
// info.generateBinaryTree(particles, G);
//}
}
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
updateSlowDownAndCorrectEnergy(true, true);
#endif
info.ds = info.calcDsKeplerBinaryTree(*bin_interrupt.adr, manager->step.getOrder(), G, manager->ds_scale);
Float ds_max = manager->step.calcStepModifyFactorFromErrorRatio(2.0)*ds_init;
Float ds_min = manager->step.calcStepModifyFactorFromErrorRatio(0.5)*ds_init;
if (info.ds>ds_max || info.ds<ds_min) {
#ifdef AR_DEBUG_PRINT
std::cerr<<"Change ds after interruption: ds(init): "<<ds_init<<" ds(new): "<<info.ds<<" ds(now): "<<ds[0]<<std::endl;
#endif
ASSERT(info.ds>0);
ds[0] = std::min(ds[0], info.ds);
ds[1] = std::min(ds[1], info.ds);
ds_backup.initial(info.ds);
ds_init = info.ds;
}
else info.ds = ds_init;
// return one should be the top root
if (bin_interrupt_return.status!=InterruptStatus::none) {
if (bin_interrupt_return.adr!= bin_interrupt.adr) {
// give root address if interrupted binaries are different from previous one
bin_interrupt_return.adr = &(info.getBinaryTreeRoot());
}
if (bin_interrupt.status==InterruptStatus::merge)
bin_interrupt_return.status = InterruptStatus::merge;
}
else bin_interrupt_return = bin_interrupt;
}
bin_interrupt.clear();
}
}
// update binary orbit and ds if unstable
if (!time_end_flag&&!binary_update_flag) {
bool update_flag=updateBinarySemiEccPeriodIter(bin_root, G, time_);
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
updateSlowDownAndCorrectEnergy(true, true);
#endif
if (update_flag) {
// update slowdown and correct slowdown energy and gt_inv
#ifdef AR_DEBUG_PRINT
std::cerr<<"Update binary tree orbits, time= "<<time_<<"\n";
#endif
info.ds = info.calcDsKeplerBinaryTree(bin_root, manager->step.getOrder(), G, manager->ds_scale);
if (abs(ds_init-info.ds)/ds_init>0.1) {
#ifdef AR_DEBUG_PRINT
std::cerr<<"Change ds after update binary orbit: ds(init): "<<ds_init<<" ds(new): "<<info.ds<<" ds(now): "<<ds[0]<<std::endl;
#endif
ASSERT(info.ds>0);
ds[0] = std::min(ds[0], info.ds);
ds[1] = std::min(ds[1], info.ds);
ds_backup.initial(info.ds);
ds_init = info.ds;
}
}
}
int bk_return_size = backupIntData(backup_data);
ASSERT(bk_return_size == bk_data_size);
(void)bk_return_size;
}
else { //restore data
int bk_return_size = restoreIntData(backup_data);
ASSERT(bk_return_size == bk_data_size);
(void)bk_return_size;
//#ifdef AR_SLOWDOWN_ARRAY
// update c.m. of binaries
// binary c.m. is not backup, thus recalculate to get correct c.m. velocity for position drift correction due to slowdown inner (the first drift in integrateonestep assume c.m. vel is up to date)
// updateCenterOfMassForBinaryWithSlowDownInner();
//#elif AR_SLOWDOWN_TREE
updateBinaryCMIter(info.getBinaryTreeRoot());
//#endif
}
// get real time
Float dt = time_;
// integrate one step
ASSERT(!ISINF(ds[ds_switch]));
if(n_particle==2) integrateTwoOneStep(ds[ds_switch], time_table);
else integrateOneStep(ds[ds_switch], time_table);
//info.generateBinaryTree(particles, G);
// real step size
dt = time_ - dt;
// ASSERT(dt>0.0);
step_count++;
// energy check
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
Float energy_error_bk = getEnergyErrorSlowDownFromBackup(backup_data);
Float etot_ref_bk = getEtotSlowDownRefFromBackup(backup_data);
Float energy_error = getEnergyErrorSlowDown();
Float H_bk = getHSlowDownFromBackup(backup_data);
Float H = getHSlowDown();
#else
Float energy_error_bk = getEnergyErrorFromBackup(backup_data);
Float etot_ref_bk = getEtotRefFromBackup(backup_data);
Float energy_error = getEnergyError();
Float H_bk = getHFromBackup(backup_data);
Float H = getH();
#endif
Float energy_error_diff = energy_error - energy_error_bk;
Float energy_error_rel_abs = abs(energy_error_diff/etot_ref_bk);
// get integration error for extended Hamiltonian
Float integration_error_rel_abs = abs(H-H_bk);
// H should be zero initially
Float integration_error_rel_cum_abs = abs(H);
Float integration_error_ratio = energy_error_rel_max/integration_error_rel_abs;
// time error
Float time_diff_rel = (_time_end - time_)/dt_full;
//! regular block time step modification factor
auto regularStepFactor = [](const Float _fac) {
Float fac = 1.0;
if (_fac<1) while (fac>_fac) fac *= 0.5;
else {
while (fac<=_fac) fac *= 2.0;
fac *= 0.5;
}
return fac;
};
Float error_increase_ratio_regular = manager->step.calcErrorRatioFromStepModifyFactor(2.0);
// error message print
auto printMessage = [&](const char* message) {
std::cerr<<message<<std::endl;
std::cerr<<" T: "<<time_
<<" dT_err/T: "<<time_diff_rel
<<" ds: "<<ds[ds_switch]
<<" ds_init: "<<ds_init
<<" |Int_err/E|: "<<integration_error_rel_abs
<<" |Int_err_cum/E|: "<<integration_error_rel_cum_abs
<<" |dE/E|: "<<energy_error_rel_abs
<<" dE_cum: "<<energy_error
<<" Etot_sd: "<<etot_ref_bk
<<" T_end_flag: "<<time_end_flag
<<" Step_count: "<<step_count;
switch (info.fix_step_option) {
case FixStepOption::always:
std::cerr<<" Fix: always"<<std::endl;
break;
case FixStepOption::later:
std::cerr<<" Fix: later"<<std::endl;
break;
case FixStepOption::none:
std::cerr<<" Fix: none"<<std::endl;
break;
default:
break;
}
};
#ifdef AR_COLLECT_DS_MODIFY_INFO
auto collectDsModifyInfo = [&](const char* error_message) {
std::cerr<<error_message<<": "
<<"time "<<time_<<" "
<<"ds_new "<<ds[1-ds_switch]<<" "
<<"ds_init "<<ds_init<<" "
<<"modify "<<step_modify_factor<<" "
<<"steps "<<step_count<<" "
<<"n_mods "<<reduce_ds_count<<" "
<<"err "<<integration_error_rel_abs<<" "
<<"err/max "<<1.0/integration_error_ratio<<" "
<<"errcum/E "<<integration_error_rel_cum_abs<<" "
<<"dt "<<dt<<" "
<<"n_ptcl "<<n_particle<<" ";
for (int i=0; i<info.binarytree.getSize(); i++) {
auto& bini = info.binarytree[i];
std::cerr<<"semi "<<bini.semi<<" "
<<"ecc "<<bini.ecc<<" "
<<"period "<<bini.period<<" "
<<"m1 "<<bini.m1<<" "
<<"m2 "<<bini.m2<<" "
<<"stab "<<bini.stab<<" "
<<"sd "<<bini.slowdown.getSlowDownFactor()<<" "
<<"sd_org "<<bini.slowdown.getSlowDownFactorOrigin()<<" "
<<"pert_in "<<bini.slowdown.getPertIn()<<" "
<<"pert_out "<<bini.slowdown.getPertOut()<<" ";
}
std::cerr<<std::endl;
};
#endif
//#ifdef AR_WARN
// warning for large number of steps
if(warning_print_once&&step_count>=manager->step_count_max) {
if(step_count%manager->step_count_max==0) {
printMessage("Warning: step count is signficiant large");
for (int i=0; i<info.binarytree.getSize(); i++){
auto& bin = info.binarytree[i];
std::cerr<<" Binary["<<i<<"]: "
<<" i1="<<bin.getMemberIndex(0)
<<" i2="<<bin.getMemberIndex(1)
<<" m1="<<bin.m1
<<" m2="<<bin.m2
<<" semi= "<<bin.semi
<<" ecc= "<<bin.ecc
<<" period= "<<bin.period
<<" stab= "<<bin.stab
<<" SD= "<<bin.slowdown.getSlowDownFactor()
<<" SD_org= "<<bin.slowdown.getSlowDownFactorOrigin()
<<" Tscale= "<<bin.slowdown.timescale
<<" pert_in= "<<bin.slowdown.pert_in
<<" pert_out= "<<bin.slowdown.pert_out;
std::cerr<<std::endl;
warning_print_once = false;
}
//printColumnTitle(std::cerr,20,info.binarytree.getSize());
//std::cerr<<std::endl;
//printColumn(std::cerr,20,info.binarytree.getSize());
//std::cerr<<std::endl;
#ifdef AR_DEBUG_DUMP
if (!info.dump_flag) {
DATADUMP("dump_large_step");
info.dump_flag=true;
}
#endif
// // increase step size if energy error is small, not works correctly, suppress
// if(integration_error_rel_abs<energy_error_rel_max) {
// Float integration_error_ratio = energy_error_rel_max/integration_error_rel_abs;
// Float step_modify_factor = manager->step.calcStepModifyFactorFromErrorRatio(integration_error_ratio);
// ASSERT(step_modify_factor>0.0);
// ds[ds_switch] *= step_modify_factor;
// info.ds = ds[ds_switch];
// ds[1-ds_switch] = ds[ds_switch];
// ds_backup.initial(info.ds);
// ds_init = info.ds;
// ASSERT(!ISINF(ds[ds_switch]));
//#ifdef AR_DEBUG_PRINT
// std::cerr<<"Energy error is small enough for increase step, integration_error_rel_abs="<<integration_error_rel_abs
// <<" energy_error_rel_max="<<energy_error_rel_max<<" step_modify_factor="<<step_modify_factor<<" new ds="<<ds[1-ds_switch]<<std::endl;
//#endif
// }
}
}
//#endif
// When time sychronization steps too large, abort
if(step_count_tsyn>manager->step_count_max) {
printMessage("Error! step count after time synchronization is too large");
printColumnTitle(std::cerr,20,info.binarytree.getSize());
std::cerr<<std::endl;
printColumn(std::cerr,20,info.binarytree.getSize());
std::cerr<<std::endl;
// restoreIntData(backup_data_init);
#ifdef AR_DEBUG_DUMP
if (!info.dump_flag) {
DATADUMP("dump_large_step");
info.dump_flag=true;
}
#endif
abort();
}
#ifdef AR_DEEP_DEBUG
printMessage("");
std::cerr<<"Timetable: ";
for (int i=0; i<cd_pair_size; i++) std::cerr<<" "<<time_table[manager->step.getSortCumSumCKIndex(i)];
std::cerr<<std::endl;
#endif
ASSERT(!ISNAN(integration_error_rel_abs));
// modify step if energy error is large
if(integration_error_rel_abs>energy_error_rel_max && info.fix_step_option!=FixStepOption::always) {
bool check_flag = true;
// check whether already modified
if (previous_step_modify_factor!=1.0) {
ASSERT(previous_error_ratio>0.0);
// if error does not reduce much, do not modify step anymore
if (integration_error_ratio>0.5*previous_error_ratio) check_flag=false;
}
if (check_flag) {
// for initial steps, reduce step permanently
if(step_count<5) {
// estimate the modification factor based on the symplectic order
// limit step_modify_factor to 0.125
step_modify_factor = std::max(regularStepFactor(manager->step.calcStepModifyFactorFromErrorRatio(integration_error_ratio)), Float(0.125));
ASSERT(step_modify_factor>0.0);
previous_step_modify_factor = step_modify_factor;
previous_error_ratio = integration_error_ratio;
ds[ds_switch] *= step_modify_factor;
ds[1-ds_switch] = ds[ds_switch];
// permanently reduce ds
// info.ds = ds[ds_switch];
// ASSERT(!ISINF(info.ds));
ds_backup.initial(info.ds);
backup_flag = false;
#ifdef AR_COLLECT_DS_MODIFY_INFO
collectDsModifyInfo("Large_energy_error");
#endif
continue;
}
// for big energy error, reduce step temparely
else if (info.fix_step_option==FixStepOption::none) {
// estimate the modification factor based on the symplectic order
// limit step_modify_factor to 0.125
step_modify_factor = std::max(regularStepFactor(manager->step.calcStepModifyFactorFromErrorRatio(integration_error_ratio)), Float(0.125));
ASSERT(step_modify_factor>0.0);
previous_step_modify_factor = step_modify_factor;
previous_error_ratio = integration_error_ratio;
ds_backup.backup(ds[ds_switch], step_modify_factor);
if(previous_is_restore) reduce_ds_count++;
ds[ds_switch] *= step_modify_factor;
ds[1-ds_switch] = ds[ds_switch];
ASSERT(!ISINF(ds[ds_switch]));
// if multiple times reduction happens, permanently reduce ds
//if (reduce_ds_count>3) {
// bool shift_flag = ds_backup.shiftReduceLevel();
// if (!shift_flag) ds_backup.initial(ds[ds_switch]);
// info.ds = ds_backup.ds_backup[0];
// reduce_ds_count=0;
//}
backup_flag = false;
#ifdef AR_COLLECT_DS_MODIFY_INFO
collectDsModifyInfo("Large_energy_error");
#endif
continue;
}
}
}
// too much output
//#ifdef AR_WARN
// if(integration_error_rel_abs>100.0*energy_error_rel_max) {
// std::cerr<<"Warning: symplectic integrator error > 100*criterion:"<<integration_error_rel_abs<<std::endl;
// }
//#endif
// if negative step, reduce step size
if(!time_end_flag&&dt<0) {
// limit step_modify_factor to 0.125
step_modify_factor = std::min(std::max(regularStepFactor(manager->step.calcStepModifyFactorFromErrorRatio(abs(_time_end/dt))), Float(0.0625)),Float(0.5));
ASSERT(step_modify_factor>0.0);
previous_step_modify_factor = step_modify_factor;
previous_error_ratio = integration_error_ratio;
ds[ds_switch] *= step_modify_factor;
ds[1-ds_switch] = ds[ds_switch];
ASSERT(!ISINF(ds[ds_switch]));
// for initial steps, reduce step permanently
if (step_count<5) {
//info.ds = ds[ds_switch];
ds_backup.initial(info.ds);
}
else { // reduce step temparely
ds_backup.backup(ds[ds_switch], step_modify_factor);
}
backup_flag = false;
#ifdef AR_COLLECT_DS_MODIFY_INFO
collectDsModifyInfo("Negative_step");
#endif
continue;
// std::cerr<<"Error! symplectic integrated time step ("<<dt<<") < minimum step ("<<dt_min<<")!\n";
// printMessage();
//#ifdef AR_DEBUG_DUMP
// DATADUMP("dump_negative_time");
//#endif
// abort();
}
// if no modification, reset previous values
previous_step_modify_factor = 1.0;
previous_error_ratio = -1.0;
// check integration time
if(time_ < _time_end - time_error){
// step increase depend on n_step_wait_recover_ds
if(info.fix_step_option==FixStepOption::none && !time_end_flag) {
// waiting step count reach
previous_is_restore=ds_backup.countAndRecover(ds[1-ds_switch], step_modify_factor, integration_error_ratio>error_increase_ratio_regular);
if (previous_is_restore) {
//previous_error_ratio = -1;
//previous_step_modify_factor = 1.0;
#ifdef AR_COLLECT_DS_MODIFY_INFO
collectDsModifyInfo("Reuse_backup_ds");
#endif
}
// increase step size if energy error is small, not works correctly, integration error may not increase when ds becomes larger, then a very large ds may appear after several iterations. suppress
else if(integration_error_rel_abs<0.5*energy_error_rel_max && dt>0.0 && dt_full/dt>std::max(100.0,0.02*manager->step_count_max)) {
Float integration_error_ratio = energy_error_rel_max/integration_error_rel_abs;
Float step_modify_factor = std::min(Float(100.0),manager->step.calcStepModifyFactorFromErrorRatio(integration_error_ratio));
ASSERT(step_modify_factor>0.0);
ds[1-ds_switch] *= step_modify_factor;
info.ds = ds[1-ds_switch];
ASSERT(!ISINF(ds[1-ds_switch]));
#ifdef AR_DEBUG_PRINT
std::cerr<<"Energy error is small enough for increase step, integration_error_rel_abs="<<integration_error_rel_abs
<<" energy_error_rel_max="<<energy_error_rel_max<<" step_modify_factor="<<step_modify_factor<<" new ds="<<ds[1-ds_switch]<<std::endl;
#endif
}
}
// time sychronization on case, when step size too small to reach time end, increase step size
if(time_end_flag && ds[ds_switch]==ds[1-ds_switch]) {
step_count_tsyn++;
Float dt_end = _time_end - time_;
if (dt<0) {
// limit step_modify_factor to 0.125
step_modify_factor = std::min(std::max(regularStepFactor(manager->step.calcStepModifyFactorFromErrorRatio(abs(_time_end/dt))), Float(0.0625)),Float(0.5));
ASSERT(step_modify_factor>0.0);
ds[ds_switch] *= step_modify_factor;
ds[1-ds_switch] = ds[ds_switch];
ASSERT(!ISINF(ds[ds_switch]));
}
else if (n_step_end>1 && dt<0.3*dt_end) {
// dt should be >0.0
// ASSERT(dt>0.0);
ds[1-ds_switch] = ds[ds_switch] * dt_end/dt;
ASSERT(!ISINF(ds[1-ds_switch]));
#ifdef AR_DEEP_DEBUG
std::cerr<<"Time step dt(real) "<<dt<<" <0.3*(time_end-time)(real) "<<dt_end<<" enlarge step factor: "<<dt_end/dt<<" new ds: "<<ds[1-ds_switch]<<std::endl;
#endif
}
else n_step_end++;
}
// when used once, update to the new step
ds[ds_switch] = ds[1-ds_switch];
ASSERT(!ISINF(ds[ds_switch]));
ds_switch = 1-ds_switch;
if (dt>0) backup_flag = true;
else backup_flag = false;
}
else if(time_ > _time_end + time_error) {
time_end_flag = true;
backup_flag = false;
step_count_tsyn++;
n_step_end=0;
// check timetable
int i=-1,k=0; // i indicate the increasing time index, k is the corresponding index in time_table
for(i=0; i<cd_pair_size; i++) {
k = manager->step.getSortCumSumCKIndex(i);
if(_time_end<time_table[k]) break;
}
if (i==0) { // first step case
ASSERT(time_table[k]>0.0);
ds[ds_switch] *= manager->step.getSortCumSumCK(i)*_time_end/time_table[k];
ds[1-ds_switch] = ds[ds_switch];
ASSERT(!ISINF(ds[ds_switch]));
#ifdef AR_DEEP_DEBUG
std::cerr<<"Time_end reach, time[k]= "<<time_table[k]<<" time= "<<time_<<" time_end/time[k]="<<_time_end/time_table[k]<<" CumSum_CK="<<manager->step.getSortCumSumCK(i)<<" ds(next) = "<<ds[ds_switch]<<" ds(next_next) = "<<ds[1-ds_switch]<<"\n";
#endif
}
else { // not first step case, get the interval time
// previous integrated sub time in time table
Float time_prev = time_table[manager->step.getSortCumSumCKIndex(i-1)];
Float dt_k = time_table[k] - time_prev;
Float ds_tmp = ds[ds_switch];
// get cumsum CK factor for two steps near the time_end
Float cck_prev = manager->step.getSortCumSumCK(i-1);
Float cck = manager->step.getSortCumSumCK(i);
// in case the time is between two sub step, first scale the next step with the previous step CumSum CK cck(i-1)
ASSERT(!ISINF(cck_prev));
ds[ds_switch] *= cck_prev;
ASSERT(!ISINF(ds[ds_switch]));
// then next next step, scale with the CumSum CK between two step: cck(i) - cck(i-1)
ASSERT(dt_k>0.0);
ds[1-ds_switch] = ds_tmp*(cck-cck_prev)*std::min(Float(1.0),(_time_end-time_prev+time_error)/dt_k);
ASSERT(!ISINF(ds[1-ds_switch]));
#ifdef AR_DEEP_DEBUG
std::cerr<<"Time_end reach, time_prev= "<<time_prev<<" time[k]= "<<time_table[k]<<" time= "<<time_<<" (time_end-time_prev)/dt="<<(_time_end-time_prev)/dt<<" CumSum_CK="<<cck<<" CumSum_CK(prev)="<<cck_prev<<" ds(next) = "<<ds[ds_switch]<<" ds(next_next) = "<<ds[1-ds_switch]<<" \n";
#endif
}
}
else {
#ifdef AR_DEEP_DEBUG
std::cerr<<"Finish, time_diff_rel = "<<time_diff_rel<<" integration_error_rel_abs = "<<integration_error_rel_abs<<std::endl;
#endif
//#ifdef AR_WARN
// if (integration_error_rel_cum_abs>energy_error_rel_max) {
// std::cerr<<"AR large energy error at the end! ";
// printMessage();
//#ifdef AR_DEBUG_DUMP
//// restoreIntData(backup_data_init);
// DATADUMP("dump_large_error");
//#endif
// }
//#endif
break;
}
}
// cumulative step count
profile.step_count = step_count;
profile.step_count_tsyn = step_count_tsyn;
profile.step_count_sum += step_count;
profile.step_count_tsyn_sum += step_count_tsyn;
return bin_interrupt_return;
}
//! correct CM drift
/*! calculate c.m. and correct the member data to the c.m. frame.
This is used after the perturbation, in case the c.m. drift when members are in c.m. frame
*/
void correctCenterOfMassDrift() {
ASSERT(!particles.isOriginFrame());
Float mcm=0.0, pos_cm[3]={0.0,0.0,0.0}, vel_cm[3]={0.0,0.0,0.0};
auto* particle_data= particles.getDataAddress();
for (int i=0; i<particles.getSize(); i++) {
const Float *ri = particle_data[i].pos;
const Float *vi = particle_data[i].getVel();
const Float mi = particle_data[i].mass;
pos_cm[0] += ri[0] * mi;
pos_cm[1] += ri[1] * mi;
pos_cm[2] += ri[2] * mi;
vel_cm[0] += vi[0] * mi;
vel_cm[1] += vi[1] * mi;
vel_cm[2] += vi[2] * mi;
mcm += mi;
}
pos_cm[0] /= mcm;
pos_cm[1] /= mcm;
pos_cm[2] /= mcm;
vel_cm[0] /= mcm;
vel_cm[1] /= mcm;
vel_cm[2] /= mcm;
for (int i=0; i<particles.getSize(); i++) {
Float *ri = particle_data[i].pos;
Float *vi = particle_data[i].getVel();
ri[0] -= pos_cm[0];
ri[1] -= pos_cm[1];
ri[2] -= pos_cm[2];
vi[0] -= vel_cm[0];
vi[1] -= vel_cm[1];
vi[2] -= vel_cm[2];
}
}
#ifdef AR_SLOWDOWN_ARRAY
//! write back particles with slowdown velocity
/*! write back particles with slowdown velocity to original address
@param[in] _particle_cm: center of mass particle to calculate the original frame, different from the particles.cm
*/
template <class Tptcl>
void writeBackSlowDownParticles(const Tptcl& _particle_cm) {
ASSERT(particles.getMode()==COMM::ListMode::copy);
ASSERT(!particles.isOriginFrame());
auto* particle_adr = particles.getOriginAddressArray();
auto* particle_data= particles.getDataAddress();
const Float kappa_inv = 1.0/binary_slowdown[0]->slowdown.getSlowDownFactor();
for (int i=0; i<particles.getSize(); i++) {
//ASSERT(particle_adr[i]->mass == particle_data[i].mass);
particle_adr[i]->mass = particle_data[i].mass;
particle_adr[i]->pos[0] = particle_data[i].pos[0] + _particle_cm.pos[0];
particle_adr[i]->pos[1] = particle_data[i].pos[1] + _particle_cm.pos[1];
particle_adr[i]->pos[2] = particle_data[i].pos[2] + _particle_cm.pos[2];
particle_adr[i]->vel[0] = particle_data[i].vel[0]*kappa_inv + _particle_cm.vel[0];
particle_adr[i]->vel[1] = particle_data[i].vel[1]*kappa_inv + _particle_cm.vel[1];
particle_adr[i]->vel[2] = particle_data[i].vel[2]*kappa_inv + _particle_cm.vel[2];
}
// correct inner slowdown velocity
int nsd= binary_slowdown.getSize();
for (int i=1; i<nsd; i++) {
auto& sdi = binary_slowdown[i];
ASSERT(sdi!=NULL);
Float kappa = sdi->slowdown.getSlowDownFactor();
Float kappa_inv_m_one = (1.0/kappa - 1.0)*kappa_inv;
Float* velcm = sdi->getVel();
for (int k=0; k<2; k++) {
int j = sdi->getMemberIndex(k);
ASSERT(j>=0&&j<particles.getSize());
Float* vel = particle_data[j].getVel();
// only scale velocity referring to binary c.m.
Float vrel[3] = { vel[0] - velcm[0],
vel[1] - velcm[1],
vel[2] - velcm[2]};
particle_adr[j]->vel[0] += vrel[0] * kappa_inv_m_one;
particle_adr[j]->vel[1] += vrel[1] * kappa_inv_m_one;
particle_adr[j]->vel[2] += vrel[2] * kappa_inv_m_one;
}
}
}
#endif
#ifdef AR_SLOWDOWN_TREE
//! write back slowdown particle iteration function
/*!
@param[in] _particle_cm: center of mass particle to calculate the original frame, different from the particles.cm
@param[in] _vel_sd_up: upper slowdown cm velocity
@param[in] _inv_nest_sd_up: upper inverse nested slowdown factor
@param[in] _bin: current binary
*/
template <class Tptcl>
void writeBackSlowDownParticlesIter(const Tptcl& _particle_cm, const Float* _vel_sd_up, const Float& _inv_nest_sd_up, AR::BinaryTree<Tparticle>& _bin) {
Float inv_nest_sd = _inv_nest_sd_up/_bin.slowdown.getSlowDownFactor();
Float* vel_cm = _bin.getVel();
for (int k=0; k<2; k++) {
if (_bin.isMemberTree(k)) {
auto* bink = _bin.getMemberAsTree(k);
Float* vel = bink->getVel();
Float vel_sd[3] = {(vel[0] - vel_cm[0]) * inv_nest_sd + _vel_sd_up[0],
(vel[1] - vel_cm[1]) * inv_nest_sd + _vel_sd_up[1],
(vel[2] - vel_cm[2]) * inv_nest_sd + _vel_sd_up[2]};
writeBackSlowDownParticlesIter(_particle_cm, vel_sd, inv_nest_sd, *bink);
}
else {
int i = _bin.getMemberIndex(k);
auto& pk = particles[i];
auto* pk_adr = particles.getMemberOriginAddress(i);
Float* vel = pk.getVel();
Float vel_sd[3] = {(vel[0] - vel_cm[0]) * inv_nest_sd + _vel_sd_up[0],
(vel[1] - vel_cm[1]) * inv_nest_sd + _vel_sd_up[1],
(vel[2] - vel_cm[2]) * inv_nest_sd + _vel_sd_up[2]};
pk_adr->mass = pk.mass;
pk_adr->pos[0] = pk.pos[0] + _particle_cm.pos[0];
pk_adr->pos[1] = pk.pos[1] + _particle_cm.pos[1];
pk_adr->pos[2] = pk.pos[2] + _particle_cm.pos[2];
pk_adr->vel[0] = vel_sd[0] + _particle_cm.vel[0];
pk_adr->vel[1] = vel_sd[1] + _particle_cm.vel[1];
pk_adr->vel[2] = vel_sd[2] + _particle_cm.vel[2];
}
}
}
//! write back particles with slowdown velocity
/*! write back particles with slowdown velocity to original address
@param[in] _particle_cm: center of mass particle to calculate the original frame, different from the particles.cm
*/
template <class Tptcl>
void writeBackSlowDownParticles(const Tptcl& _particle_cm) {
//! iteration function using binarytree
auto& bin_root=info.getBinaryTreeRoot();
Float vel_cm[3] = {0.0,0.0,0.0};
Float sd_factor=1.0;
writeBackSlowDownParticlesIter(_particle_cm, vel_cm, sd_factor, bin_root);
}
#endif
//! write back particles to original address
/*! If particles are in center-off-mass frame, write back the particle in original frame but not modify local copies to avoid roundoff error
*/
template <class Tptcl>
void writeBackParticlesOriginFrame() {
ASSERT(particles.getMode()==COMM::ListMode::copy);
auto* particle_adr = particles.getOriginAddressArray();
auto* particle_data= particles.getDataAddress();
if (particles.isOriginFrame()) {
for (int i=0; i<particles.getSize(); i++) {
*(Tptcl*)particle_adr[i] = particle_data[i];
}
}
else {
for (int i=0; i<particles.getSize(); i++) {
Tptcl pc = particle_data[i];
pc.pos[0] = particle_data[i].pos[0] + particles.cm.pos[0];
pc.pos[1] = particle_data[i].pos[1] + particles.cm.pos[1];
pc.pos[2] = particle_data[i].pos[2] + particles.cm.pos[2];
pc.vel[0] = particle_data[i].vel[0] + particles.cm.vel[0];
pc.vel[1] = particle_data[i].vel[1] + particles.cm.vel[1];
pc.vel[2] = particle_data[i].vel[2] + particles.cm.vel[2];
*(Tptcl*)particle_adr[i] = pc;
}
}
}
//! Get current physical time
/*! \return current physical time
*/
Float getTime() const {
return time_;
}
//! Get current kinetic energy
/*! \return current kinetic energy
*/
Float getEkin() const {
return ekin_;
}
//! Get current potential energy
/*! \return current potetnial energy (negative value for bounded systems)
*/
Float getEpot() const {
return epot_;
}
//! Get current total integrated energy
/*! \return total integrated energy
*/
Float getEtotRef() const {
return etot_ref_;
}
//! Get current total energy from ekin and epot
/*! \return total integrated energy
*/
Float getEtot() const {
return ekin_ + epot_;
}
//! get perturbation potential energy
/*! \return perturbation potential energy
*/
Float getEpert() const {
Float epert=0.0;
int n_particle = particles.getSize();
for (int i=0; i<n_particle; i++) {
epert += force_[i].pot_pert*particles[i].mass;
}
return epert;
}
//! get energy error
/*! \return energy error
*/
Float getEnergyError() const {
return ekin_ + epot_ - etot_ref_;
}
//! get energy error from backup data
Float getEnergyErrorFromBackup(Float* _bk) const {
return -_bk[1] + _bk[2] + _bk[3];
}
//! get integrated energy from backup data
Float getEtotRefFromBackup(Float* _bk) const {
return _bk[1];
}
//! get total energy from backup data (ekin+epot)
Float getEtotFromBackup(Float* _bk) const {
return _bk[2] + _bk[3];
}
//! reset cumulative energy/hamiltonian change due to interruption
void resetDEChangeBinaryInterrupt() {
de_change_interrupt_ = 0.0;
dH_change_interrupt_ = 0.0;
}
//! get cumulative energy change due to interruption
Float getDEChangeBinaryInterrupt() const {
return de_change_interrupt_;
}
//! get cumulative hamiltonian change due to interruption
Float getDHChangeBinaryInterrupt() const {
return dH_change_interrupt_;
}
//! get Hamiltonian
Float getH() const {
#ifdef AR_TTL
//return (ekin_ - etot_ref_)/gt_drift_inv_ + epot_/gt_kick_inv_;
return (ekin_ + epot_ - etot_ref_)/gt_kick_inv_;
#else
return manager->interaction.calcH(ekin_ - etot_ref_, epot_);
#endif
}
//! get Hamiltonian from backup data
Float getHFromBackup(Float* _bk) const {
Float& etot_ref =_bk[1];
Float& ekin = _bk[2];
Float& epot = _bk[3];
#ifdef AR_TTL
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
//Float& gt_drift_inv = _bk[13];
Float& gt_kick_inv = _bk[14];
#else
//Float& gt_drift_inv = _bk[6];
Float& gt_kick_inv = _bk[7];
#endif
return (ekin + epot - etot_ref)/gt_kick_inv;
//return (ekin - etot_ref)/gt_drift_inv + epot/gt_kick_inv;
#else
return manager->interaction.calcH(ekin - etot_ref, epot);
#endif
}
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
//! reset cumulative energy change due to slowdown change
void resetDESlowDownChangeCum() {
de_sd_change_cum_ = 0.0;
dH_sd_change_cum_ = 0.0;
}
//! get cumulative energy change due to slowdown change
Float getDESlowDownChangeCum() const {
return de_sd_change_cum_;
}
//! get cumulative hamiltonian change due to slowdown change
Float getDHSlowDownChangeCum() const {
return dH_sd_change_cum_;
}
//! reset cumulative energy change due to interruption
void resetDESlowDownChangeBinaryInterrupt() {
de_sd_change_interrupt_ = 0.0;
dH_sd_change_interrupt_ = 0.0;
}
//! get cumulative energy change due to interruption
Float getDESlowDownChangeBinaryInterrupt() const {
return de_sd_change_interrupt_;
}
//! get cumulative hamiltonian change due to interruption
Float getDHSlowDownChangeBinaryInterrupt() const {
return dH_sd_change_interrupt_;
}
//! Get current kinetic energy with inner slowdown
/*! \return current kinetic energy with inner slowdown
*/
Float getEkinSlowDown() const {
return ekin_sd_;
}
//! Get current potential energy with inner slowdown
/*! \return current potetnial energy with inner slowdown (negative value for bounded systems)
*/
Float getEpotSlowDown() const {
return epot_sd_;
}
//! Get current total integrated energy with inner slowdown
/*! \return total integrated energy with inner slowdown
*/
Float getEtotSlowDownRef() const {
return etot_sd_ref_;
}
//! Get current total energy with inner slowdown from ekin_sdi and epot_sdi
/*! \return total energy with inner slowdown
*/
Float getEtotSlowDown() const {
return ekin_sd_ + epot_sd_;
}
//! get energy error with inner slowdown
/*! \return energy error with inner slowdown
*/
Float getEnergyErrorSlowDown() const {
return ekin_sd_ + epot_sd_ - etot_sd_ref_;
}
//! get energy error with inner slowdown from backup data
Float getEnergyErrorSlowDownFromBackup(Float* _bk) const {
return -_bk[6] + _bk[7] + _bk[8];
}
//! get slowdown Hamiltonian
Float getHSlowDown() const {
#ifdef AR_TTL
//return (ekin_sd_ - etot_sd_ref_)/gt_drift_inv_ + epot_sd_/gt_kick_inv_;
return (ekin_sd_ + epot_sd_ - etot_sd_ref_)/gt_kick_inv_;
#else
return manager->interaction.calcH(ekin_sd_ - etot_sd_ref_, epot_sd_);
#endif
}
//! get slowdown Hamiltonian from backup data
Float getHSlowDownFromBackup(Float* _bk) const {
Float& etot_sd_ref =_bk[6];
Float& ekin_sd = _bk[7];
Float& epot_sd = _bk[8];
#ifdef AR_TTL
//Float& gt_drift_inv = _bk[13];
Float& gt_kick_inv = _bk[14];
//return (ekin_sd - etot_sd_ref)/gt_drift_inv + epot_sd/gt_kick_inv;
return (ekin_sd + epot_sd - etot_sd_ref)/gt_kick_inv;
#else
return manager->interaction.calcH(ekin_sd - etot_sd_ref, epot_sd);
#endif
}
//! get integrated energy with inner slowdown from backup data
Float getEtotSlowDownRefFromBackup(Float* _bk) const {
return _bk[6];
}
//! get energy with inner slowdown from backup data (ekin_sdi + epot_sdi)
Float getEtotSlowDownFromBackup(Float* _bk) const {
return _bk[7] + _bk[8];
}
#endif
//! get backup data size
int getBackupDataSize() const {
int bk_size = 6;
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
bk_size += 7;
//bk_size += SlowDown::getBackupDataSize();
#endif
#ifdef AR_TTL
bk_size += 2;
#endif
bk_size += particles.getBackupDataSize();
return bk_size;
}
//! Backup integration data
/*! Backup #time_, #etot_, #ekin_, $epot_, #gt_drift_, $gt_kick_inv_, #particles, $slowdown to one Float data array
\return backup array size
*/
int backupIntData(Float* _bk) {
int bk_size=0;
_bk[bk_size++] = time_; //0
_bk[bk_size++] = etot_ref_; //1
_bk[bk_size++] = ekin_; //2
_bk[bk_size++] = epot_; //3
_bk[bk_size++] = de_change_interrupt_; //4
_bk[bk_size++] = dH_change_interrupt_; //5
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
_bk[bk_size++] = etot_sd_ref_; //6
_bk[bk_size++] = ekin_sd_; //7
_bk[bk_size++] = epot_sd_; //8
_bk[bk_size++] = de_sd_change_cum_; //9
_bk[bk_size++] = dH_sd_change_cum_; //10
_bk[bk_size++] = de_sd_change_interrupt_; //11
_bk[bk_size++] = dH_sd_change_interrupt_; //12
#endif
#ifdef AR_TTL
_bk[bk_size++] = gt_drift_inv_; //13 / 6
_bk[bk_size++] = gt_kick_inv_; //14 / 7
#endif
bk_size += particles.backupParticlePosVel(&_bk[bk_size]);
//#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
// bk_size += info.getBinaryTreeRoot().slowdown.backup(&_bk[bk_size]); // slowdownfactor
//#endif
return bk_size;
}
//! Restore integration data
/*! restore #time_, #etot_, #ekin_, $epot_, #gt_drift_, $gt_kick_inv_, #particles, $slowdown from one Float data array
\return backup array size
*/
int restoreIntData(Float* _bk) {
int bk_size = 0;
time_ = _bk[bk_size++];
etot_ref_ = _bk[bk_size++];
ekin_ = _bk[bk_size++];
epot_ = _bk[bk_size++];
de_change_interrupt_= _bk[bk_size++];
dH_change_interrupt_= _bk[bk_size++];
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
etot_sd_ref_ = _bk[bk_size++];
ekin_sd_ = _bk[bk_size++];
epot_sd_ = _bk[bk_size++];
de_sd_change_cum_= _bk[bk_size++];
dH_sd_change_cum_= _bk[bk_size++];
de_sd_change_interrupt_= _bk[bk_size++];
dH_sd_change_interrupt_= _bk[bk_size++];
#endif
#ifdef AR_TTL
gt_drift_inv_ = _bk[bk_size++];
gt_kick_inv_ = _bk[bk_size++];
#endif
bk_size += particles.restoreParticlePosVel(&_bk[bk_size]);
//#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
// bk_size += info.getBinaryTreeRoot().slowdown.restore(&_bk[bk_size]);
//#endif
return bk_size;
}
#ifdef AR_TTL
//! Get integrated inverse time transformation factor
/*! In TTF case, it is calculated by integrating \f$ \frac{dg}{dt} = \sum_k \frac{\partial g}{\partial \vec{r_k}} \bullet \vec{v_k} \f$.
Notice last step is the sub-step in one symplectic loop
\return inverse time transformation factor for drift
*/
Float getGTDriftInv() const {
return gt_drift_inv_;
}
#endif
//! print group information
/*! Message, Number of members, time, binary tree printing interation
@param[in] _type: 0: new group (if pair id is same, no printing); 1: end group (always print and reset pair id)
@param[in] _fout: FILE IO
@param[in] _width: print width
@param[in] _pcm: center of mass particle to calculate origin position and velocity, if NULL, assume cm pos and vel are zero
*/
template<class Tptcl>
void printGroupInfo(const int _type, std::ostream& _fout, const int _width, const Tptcl* _pcm=NULL) {
auto& bin_root = info.getBinaryTreeRoot();
//auto* p1 = bin_root.getLeftMember();
//auto* p2 = bin_root.getRightMember();
bool reset_flag = (_type==1 && bin_root.semi<0 && bin_root.ecca>0);
if (info.checkAndSetBinaryPairIDIter(bin_root, reset_flag)) {
if (_type==0) return; // if it is new but already existed binary, do not print
else if (!reset_flag) return; // in the end case, if the system is still bound, do not print
}
Float pos_cm[3], vel_cm[3];
auto& pcm_loc = particles.cm;
if (_pcm!=NULL) {
pos_cm[0] = pcm_loc.pos[0] + _pcm->pos[0];
pos_cm[1] = pcm_loc.pos[1] + _pcm->pos[1];
pos_cm[2] = pcm_loc.pos[2] + _pcm->pos[2];
vel_cm[0] = pcm_loc.vel[0] + _pcm->vel[0];
vel_cm[1] = pcm_loc.vel[1] + _pcm->vel[1];
vel_cm[2] = pcm_loc.vel[2] + _pcm->vel[2];
}
else {
pos_cm[0] = pcm_loc.pos[0];
pos_cm[1] = pcm_loc.pos[1];
pos_cm[2] = pcm_loc.pos[2];
vel_cm[0] = pcm_loc.vel[0];
vel_cm[1] = pcm_loc.vel[1];
vel_cm[2] = pcm_loc.vel[2];
}
#pragma omp critical
{
_fout<<std::setw(_width)<<_type
<<std::setw(_width)<<bin_root.getMemberN()
<<std::setw(_width)<<time_ + info.time_offset;
_fout<<std::setw(_width)<<pos_cm[0]
<<std::setw(_width)<<pos_cm[1]
<<std::setw(_width)<<pos_cm[2]
<<std::setw(_width)<<vel_cm[0]
<<std::setw(_width)<<vel_cm[1]
<<std::setw(_width)<<vel_cm[2];
bin_root.printBinaryTreeIter(_fout, _width);
_fout<<std::endl;
}
//if (_type==0) { // register pair id to avoid repeating printing
// p1->setBinaryPairID(p2->id);
// p2->setBinaryPairID(p1->id);
//}
//else { // break case reset pair id
// p1->setBinaryPairID(0);
// p2->setBinaryPairID(0);
//}
}
//! print titles of class members using column style
/*! print titles of class members in one line for column style
@param[out] _fout: std::ostream output object
@param[in] _width: print width
@param[in] _n_sd: slowdown inner group
*/
void printColumnTitle(std::ostream & _fout, const int _width=20, const int _n_sd=0) {
_fout<<std::setw(_width)<<"Time"
<<std::setw(_width)<<"dE"
<<std::setw(_width)<<"Etot"
<<std::setw(_width)<<"Ekin"
<<std::setw(_width)<<"Epot"
<<std::setw(_width)<<"Gt_drift"
<<std::setw(_width)<<"H"
<<std::setw(_width)<<"dE_intr"
<<std::setw(_width)<<"dH_intr";
perturber.printColumnTitle(_fout, _width);
info.printColumnTitle(_fout, _width);
profile.printColumnTitle(_fout, _width);
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
_fout<<std::setw(_width)<<"dE_SD"
<<std::setw(_width)<<"Etot_SD"
<<std::setw(_width)<<"Ekin_SD"
<<std::setw(_width)<<"Epot_SD"
<<std::setw(_width)<<"dE_SDC_cum"
<<std::setw(_width)<<"dH_SDC_cum"
<<std::setw(_width)<<"dE_SDC_intr"
<<std::setw(_width)<<"dH_SDC_intr";
_fout<<std::setw(_width)<<"N_SD";
for (int i=0; i<_n_sd; i++) {
_fout<<std::setw(_width)<<"I1"
<<std::setw(_width)<<"I2";
SlowDown::printColumnTitle(_fout, _width);
}
#endif
particles.printColumnTitle(_fout, _width);
}
//! print data of class members using column style
/*! print data of class members in one line for column style. Notice no newline is printed at the end
@param[out] _fout: std::ostream output object
@param[in] _width: print width
@param[in] _n_sd: slowdown inner group
*/
void printColumn(std::ostream & _fout, const int _width=20, const int _n_sd=0){
_fout<<std::setw(_width)<<getTime()
<<std::setw(_width)<<getEnergyError()
<<std::setw(_width)<<etot_ref_
<<std::setw(_width)<<ekin_
<<std::setw(_width)<<epot_
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
#ifdef AR_TTL
<<std::setw(_width)<<1.0/gt_drift_inv_
#else
<<std::setw(_width)<<1.0/manager->interaction.calcGTDriftInv(ekin_sd_-etot_sd_ref_)
#endif
<<std::setw(_width)<<getHSlowDown()
#else
#ifdef AR_TTL
<<std::setw(_width)<<1.0/gt_drift_inv_
#else
<<std::setw(_width)<<1.0/manager->interaction.calcGTDriftInv(ekin_-etot_ref_)
#endif
<<std::setw(_width)<<getH()
#endif
<<std::setw(_width)<<de_change_interrupt_
<<std::setw(_width)<<dH_change_interrupt_;
perturber.printColumn(_fout, _width);
info.printColumn(_fout, _width);
profile.printColumn(_fout, _width);
#if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
_fout<<std::setw(_width)<<getEnergyErrorSlowDown()
<<std::setw(_width)<<etot_sd_ref_
<<std::setw(_width)<<ekin_sd_
<<std::setw(_width)<<epot_sd_
<<std::setw(_width)<<de_sd_change_cum_
<<std::setw(_width)<<dH_sd_change_cum_
<<std::setw(_width)<<de_sd_change_interrupt_
<<std::setw(_width)<<dH_sd_change_interrupt_;
SlowDown sd_empty;
#ifdef AR_SLOWDOWN_ARRAY
int n_sd_now = binary_slowdown.getSize();
_fout<<std::setw(_width)<<n_sd_now;
for (int i=0; i<_n_sd; i++) {
if (i<n_sd_now) {
_fout<<std::setw(_width)<<binary_slowdown[i]->getMemberIndex(0)
<<std::setw(_width)<<binary_slowdown[i]->getMemberIndex(1);
binary_slowdown[i]->slowdown.printColumn(_fout, _width);
}
else {
_fout<<std::setw(_width)<<-1
<<std::setw(_width)<<-1;
sd_empty.printColumn(_fout, _width);
}
}
#else
int n_sd_now = info.binarytree.getSize();
_fout<<std::setw(_width)<<n_sd_now;
for (int i=0; i<_n_sd; i++) {
if (i<n_sd_now) {
_fout<<std::setw(_width)<<info.binarytree[i].getMemberIndex(0)
<<std::setw(_width)<<info.binarytree[i].getMemberIndex(1);
info.binarytree[i].slowdown.printColumn(_fout, _width);
}
else {
_fout<<std::setw(_width)<<-1
<<std::setw(_width)<<-1;
sd_empty.printColumn(_fout, _width);
}
}
#endif
#endif
particles.printColumn(_fout, _width);
}
//! write class data with BINARY format
/*! @param[in] _fout: file IO for write
*/
void writeBinary(FILE *_fout) {
fwrite(&time_, sizeof(Float), 1, _fout);
fwrite(&etot_ref_, sizeof(Float), 1, _fout);
fwrite(&ekin_, sizeof(Float), 1, _fout);
fwrite(&epot_, sizeof(Float), 1, _fout);
#ifdef AR_TTL
fwrite(>_drift_inv_, sizeof(Float), 1, _fout);
#endif
int size = force_.getSize();
fwrite(&size, sizeof(int), 1, _fout);
for (int i=0; i<size; i++) force_[i].writeBinary(_fout);
particles.writeBinary(_fout);
perturber.writeBinary(_fout);
info.writeBinary(_fout);
profile.writeBinary(_fout);
}
//! read class data with BINARY format and initial the array
/*! @param[in] _fin: file IO for read
*/
void readBinary(FILE *_fin) {
size_t rcount = fread(&time_, sizeof(Float), 1, _fin);
rcount += fread(&etot_ref_, sizeof(Float), 1, _fin);
rcount += fread(&ekin_, sizeof(Float), 1, _fin);
rcount += fread(&epot_, sizeof(Float), 1, _fin);
if (rcount<4) {
std::cerr<<"Error: Data reading fails! requiring data number is 4, only obtain "<<rcount<<".\n";
abort();
}
#ifdef AR_TTL
rcount = fread(>_drift_inv_, sizeof(Float), 1, _fin);
if (rcount<1) {
std::cerr<<"Error: Data reading fails! requiring data number is 1, only obtain "<<rcount<<".\n";
abort();
}
#endif
int size;
rcount = fread(&size, sizeof(int),1, _fin);
if(rcount<1) {
std::cerr<<"Error: Data reading fails! requiring data number is 1, only obtain "<<rcount<<".\n";
abort();
}
if(size<0) {
std::cerr<<"Error: array size <0 "<<size<<"<=0!\n";
abort();
}
if (size>0) {
force_.setMode(COMM::ListMode::local);
force_.reserveMem(size);
force_.resizeNoInitialize(size);
for (int i=0; i<size; i++) force_[i].readBinary(_fin);
}
particles.setMode(COMM::ListMode::local);
particles.readBinary(_fin);
perturber.readBinary(_fin);
info.readBinary(_fin);
profile.readBinary(_fin);
}
};
}
|
matrix_arithmetic.h | /***************************************************************************
* include/stxxl/bits/containers/matrix_arithmetic.h
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2010-2011 Raoul Steffen <R-Steffen@gmx.de>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER
#define STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER
#include <stxxl/bits/mng/block_manager.h>
#include <stxxl/bits/containers/matrix_low_level.h>
STXXL_BEGIN_NAMESPACE
#ifndef STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS
#define STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS 3
#endif
#ifndef STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE
#define STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE 2
#endif
template <typename ValueType>
class column_vector;
template <typename ValueType>
class row_vector;
template <typename ValueType, unsigned BlockSideLength>
class swappable_block_matrix;
//! \addtogroup matrix
//! \{
struct matrix_operation_statistic_dataset
{
int_type block_multiplication_calls,
block_multiplications_saved_through_zero,
block_addition_calls,
block_additions_saved_through_zero;
matrix_operation_statistic_dataset()
: block_multiplication_calls(0),
block_multiplications_saved_through_zero(0),
block_addition_calls(0),
block_additions_saved_through_zero(0) { }
matrix_operation_statistic_dataset operator + (const matrix_operation_statistic_dataset& stat)
{
matrix_operation_statistic_dataset res(*this);
res.block_multiplication_calls += stat.block_multiplication_calls;
res.block_multiplications_saved_through_zero += stat.block_multiplications_saved_through_zero;
res.block_addition_calls += stat.block_addition_calls;
res.block_additions_saved_through_zero += stat.block_additions_saved_through_zero;
return res;
}
matrix_operation_statistic_dataset operator - (const matrix_operation_statistic_dataset& stat)
{
matrix_operation_statistic_dataset res(*this);
res.block_multiplication_calls -= stat.block_multiplication_calls;
res.block_multiplications_saved_through_zero -= stat.block_multiplications_saved_through_zero;
res.block_addition_calls -= stat.block_addition_calls;
res.block_additions_saved_through_zero -= stat.block_additions_saved_through_zero;
return res;
}
};
struct matrix_operation_statistic
: public singleton<matrix_operation_statistic>, public matrix_operation_statistic_dataset
{ };
struct matrix_operation_statistic_data : public matrix_operation_statistic_dataset
{
matrix_operation_statistic_data(const matrix_operation_statistic& stat = * matrix_operation_statistic::get_instance())
: matrix_operation_statistic_dataset(stat) { }
matrix_operation_statistic_data(const matrix_operation_statistic_dataset& stat)
: matrix_operation_statistic_dataset(stat) { }
matrix_operation_statistic_data& operator = (const matrix_operation_statistic& stat)
{
return *this = matrix_operation_statistic_data(stat);
}
void set()
{ operator = (*matrix_operation_statistic::get_instance()); }
matrix_operation_statistic_data operator + (const matrix_operation_statistic_data& stat)
{ return matrix_operation_statistic_data(matrix_operation_statistic_dataset(*this) + matrix_operation_statistic_dataset(stat)); }
matrix_operation_statistic_data operator - (const matrix_operation_statistic_data& stat)
{ return matrix_operation_statistic_data(matrix_operation_statistic_dataset(*this) - matrix_operation_statistic_dataset(stat)); }
};
std::ostream& operator << (std::ostream& o, const matrix_operation_statistic_data& statsd)
{
o << "matrix operation statistics" << std::endl;
o << "block multiplication calls : "
<< statsd.block_multiplication_calls << std::endl;
o << "block multiplications saved through zero blocks: "
<< statsd.block_multiplications_saved_through_zero << std::endl;
o << "block multiplications performed : "
<< statsd.block_multiplication_calls - statsd.block_multiplications_saved_through_zero << std::endl;
o << "block addition calls : "
<< statsd.block_addition_calls << std::endl;
o << "block additions saved through zero blocks : "
<< statsd.block_additions_saved_through_zero << std::endl;
o << "block additions performed : "
<< statsd.block_addition_calls - statsd.block_additions_saved_through_zero << std::endl;
return o;
}
//! \}
//! matrix low-level operations and tools
namespace matrix_local {
//! A static_quadtree holds 4^Level elements arranged in a quad tree.
//!
//! Static quad trees are useful for recursive algorithms with fixed depth
//! that partition the in- and output and perform pre- and postcalculations on the partitions.
//! The four children of one node are denoted as ul (up left), ur (up right), dl (down left), and dr (down right).
template <typename ValueType, unsigned Level>
struct static_quadtree
{
typedef static_quadtree<ValueType, Level - 1> smaller_static_quadtree;
smaller_static_quadtree ul, ur, dl, dr;
static_quadtree(smaller_static_quadtree ul, smaller_static_quadtree ur,
smaller_static_quadtree dl, smaller_static_quadtree dr)
: ul(ul), ur(ur), dl(dl), dr(dr) { }
static_quadtree() { }
static_quadtree& operator &= (const static_quadtree& right)
{
ul &= right.ul, ur &= right.ur;
dl &= right.dl, dr &= right.dr;
return *this;
}
static_quadtree& operator += (const static_quadtree& right)
{
ul += right.ul, ur += right.ur;
dl += right.dl, dr += right.dr;
return *this;
}
static_quadtree& operator -= (const static_quadtree& right)
{
ul -= right.ul, ur -= right.ur;
dl -= right.dl, dr -= right.dr;
return *this;
}
static_quadtree operator & (const static_quadtree& right) const
{ return static_quadtree(ul & right.ul, ur & right.ur, dl & right.dl, dr & right.dr); }
static_quadtree operator + (const static_quadtree& right) const
{ return static_quadtree(ul + right.ul, ur + right.ur, dl + right.dl, dr + right.dr); }
static_quadtree operator - (const static_quadtree& right) const
{ return static_quadtree(ul - right.ul, ur - right.ur, dl - right.dl, dr - right.dr); }
};
template <typename ValueType>
struct static_quadtree<ValueType, 0>
{
ValueType val;
static_quadtree(const ValueType& v)
: val(v) { }
static_quadtree() { }
operator const ValueType& () const
{ return val; }
operator ValueType& ()
{ return val; }
static_quadtree& operator &= (const static_quadtree& right)
{
val &= right.val;
return *this;
}
static_quadtree& operator += (const static_quadtree& right)
{
val += right.val;
return *this;
}
static_quadtree& operator -= (const static_quadtree& right)
{
val -= right.val;
return *this;
}
static_quadtree operator ! () const
{ return static_quadtree(! val); }
static_quadtree operator & (const static_quadtree& right) const
{ return val & right.val; }
static_quadtree operator + (const static_quadtree& right) const
{ return val + right.val; }
static_quadtree operator - (const static_quadtree& right) const
{ return val - right.val; }
};
template <typename ValueType, unsigned BlockSideLength, unsigned Level, bool AExists, bool BExists>
struct feedable_strassen_winograd
{
typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block
typedef static_quadtree<ValueType, Level> vt;
typedef feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, AExists, BExists> smaller_feedable_strassen_winograd_ab;
typedef feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, AExists, false> smaller_feedable_strassen_winograd_a;
typedef feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, false, BExists> smaller_feedable_strassen_winograd_b;
typedef feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, false, false> smaller_feedable_strassen_winograd_n;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
const size_type n, m, l;
smaller_feedable_strassen_winograd_ab p1, p2;
smaller_feedable_strassen_winograd_n p3, p4, p5;
smaller_feedable_strassen_winograd_b p6;
smaller_feedable_strassen_winograd_a p7;
feedable_strassen_winograd(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: n(n), m(m), l(l),
p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col),
p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col),
p3( bs_c, n/2, m/2, l/2),
p4( bs_c, n/2, m/2, l/2),
p5( bs_c, n/2, m/2, l/2),
p6( bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2),
p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {}
feedable_strassen_winograd(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: n(n), m(m), l(l),
p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2),
p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2),
p3( bs_c, n/2, m/2, l/2),
p4( bs_c, n/2, m/2, l/2),
p5( bs_c, n/2, m/2, l/2),
p6( bs_c, n/2, m/2, l/2),
p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {}
feedable_strassen_winograd(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: n(n), m(m), l(l),
p1(bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col),
p2(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col),
p3(bs_c, n/2, m/2, l/2),
p4(bs_c, n/2, m/2, l/2),
p5(bs_c, n/2, m/2, l/2),
p6(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2),
p7(bs_c, n/2, m/2, l/2) {}
feedable_strassen_winograd(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: n(n), m(m), l(l),
p1(bs_c, n / 2, m / 2, l / 2),
p2(bs_c, n / 2, m / 2, l / 2),
p3(bs_c, n / 2, m / 2, l / 2),
p4(bs_c, n / 2, m / 2, l / 2),
p5(bs_c, n / 2, m / 2, l / 2),
p6(bs_c, n / 2, m / 2, l / 2),
p7(bs_c, n / 2, m / 2, l / 2) { }
void begin_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
typename zbt::smaller_static_quadtree
s1 = zb.dl & zb.dr,
s2 = s1 & zb.ul,
s3 = zb.ul & zb.dl,
s4 = zb.ur & s2;
p1.begin_feeding_a_block(block_row, block_col, zb.ul);
p2.begin_feeding_a_block(block_row, block_col, zb.ur);
p3.begin_feeding_a_block(block_row, block_col, s1);
p4.begin_feeding_a_block(block_row, block_col, s2);
p5.begin_feeding_a_block(block_row, block_col, s3);
p6.begin_feeding_a_block(block_row, block_col, s4);
p7.begin_feeding_a_block(block_row, block_col, zb.dr);
}
void feed_a_element(const int_type element_num, const vt v)
{
typename vt::smaller_static_quadtree
s1 = v.dl + v.dr,
s2 = s1 - v.ul,
s3 = v.ul - v.dl,
s4 = v.ur - s2;
p1.feed_a_element(element_num, v.ul);
p2.feed_a_element(element_num, v.ur);
p3.feed_a_element(element_num, s1);
p4.feed_a_element(element_num, s2);
p5.feed_a_element(element_num, s3);
p6.feed_a_element(element_num, s4);
p7.feed_a_element(element_num, v.dr);
}
void end_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
typename zbt::smaller_static_quadtree
s1 = zb.dl & zb.dr,
s2 = s1 & zb.ul,
s3 = zb.ul & zb.dl,
s4 = zb.ur & s2;
p1.end_feeding_a_block(block_row, block_col, zb.ul);
p2.end_feeding_a_block(block_row, block_col, zb.ur);
p3.end_feeding_a_block(block_row, block_col, s1);
p4.end_feeding_a_block(block_row, block_col, s2);
p5.end_feeding_a_block(block_row, block_col, s3);
p6.end_feeding_a_block(block_row, block_col, s4);
p7.end_feeding_a_block(block_row, block_col, zb.dr);
}
void begin_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
typename zbt::smaller_static_quadtree
t1 = zb.ur & zb.ul,
t2 = zb.dr & t1,
t3 = zb.dr & zb.ur,
t4 = zb.dl & t2;
p1.begin_feeding_b_block(block_row, block_col, zb.ul);
p2.begin_feeding_b_block(block_row, block_col, zb.dl);
p3.begin_feeding_b_block(block_row, block_col, t1);
p4.begin_feeding_b_block(block_row, block_col, t2);
p5.begin_feeding_b_block(block_row, block_col, t3);
p6.begin_feeding_b_block(block_row, block_col, zb.dr);
p7.begin_feeding_b_block(block_row, block_col, t4);
}
void feed_b_element(const int_type element_num, const vt v)
{
typename vt::smaller_static_quadtree
t1 = v.ur - v.ul,
t2 = v.dr - t1,
t3 = v.dr - v.ur,
t4 = v.dl - t2;
p1.feed_b_element(element_num, v.ul);
p2.feed_b_element(element_num, v.dl);
p3.feed_b_element(element_num, t1);
p4.feed_b_element(element_num, t2);
p5.feed_b_element(element_num, t3);
p6.feed_b_element(element_num, v.dr);
p7.feed_b_element(element_num, t4);
}
void end_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
typename zbt::smaller_static_quadtree
t1 = zb.ur & zb.ul,
t2 = zb.dr & t1,
t3 = zb.dr & zb.ur,
t4 = zb.dl & t2;
p1.end_feeding_b_block(block_row, block_col, zb.ul);
p2.end_feeding_b_block(block_row, block_col, zb.dl);
p3.end_feeding_b_block(block_row, block_col, t1);
p4.end_feeding_b_block(block_row, block_col, t2);
p5.end_feeding_b_block(block_row, block_col, t3);
p6.end_feeding_b_block(block_row, block_col, zb.dr);
p7.end_feeding_b_block(block_row, block_col, t4);
}
void multiply()
{
p1.multiply();
p2.multiply();
p3.multiply();
p4.multiply();
p5.multiply();
p6.multiply();
p7.multiply();
}
zbt begin_reading_block(const size_type& block_row, const size_type& block_col)
{
zbt r;
r.ur = r.ul = p1.begin_reading_block(block_row, block_col);
r.ul &= p2.begin_reading_block(block_row, block_col);
r.ur &= p4.begin_reading_block(block_row, block_col);
r.dr = r.dl = p5.begin_reading_block(block_row, block_col);
r.dl &= r.ur;
r.dl &= p7.begin_reading_block(block_row, block_col);
r.ur &= p3.begin_reading_block(block_row, block_col);
r.dr &= r.ur;
r.ur &= p6.begin_reading_block(block_row, block_col);
return r;
}
vt read_element(int_type element_num)
{
vt r;
r.ur = r.ul = p1.read_element(element_num);
r.ul += p2.read_element(element_num);
r.ur += p4.read_element(element_num);
r.dr = r.dl = p5.read_element(element_num);
r.dl += r.ur;
r.dl += p7.read_element(element_num);
r.ur += p3.read_element(element_num);
r.dr += r.ur;
r.ur += p6.read_element(element_num);
return r;
}
zbt end_reading_block(const size_type& block_row, const size_type& block_col)
{
zbt r;
r.ur = r.ul = p1.end_reading_block(block_row, block_col);
r.ul &= p2.end_reading_block(block_row, block_col);
r.ur &= p4.end_reading_block(block_row, block_col);
r.dr = r.dl = p5.end_reading_block(block_row, block_col);
r.dl &= r.ur;
r.dl &= p7.end_reading_block(block_row, block_col);
r.ur &= p3.end_reading_block(block_row, block_col);
r.dr &= r.ur;
r.ur &= p6.end_reading_block(block_row, block_col);
return r;
}
};
template <typename ValueType, unsigned BlockSideLength, bool AExists, bool BExists>
struct feedable_strassen_winograd<ValueType, BlockSideLength, 0, AExists, BExists>
{
typedef static_quadtree<bool, 0> zbt; // true <=> is a zero-block
typedef static_quadtree<ValueType, 0> vt;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
swappable_block_matrix_type a, b, c;
const size_type n, m, l;
internal_block_type* iblock;
feedable_strassen_winograd(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: a(existing_a, n, l, a_from_row, a_from_col),
b(existing_b, n, l, b_from_row, b_from_col),
c(bs_c, n, m),
n(n), m(m), l(l),
iblock(0) { }
feedable_strassen_winograd(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: a(existing_a, n, l, a_from_row, a_from_col),
b(bs_c, n, l),
c(bs_c, n, m),
n(n), m(m), l(l),
iblock(0) { }
feedable_strassen_winograd(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: a(bs_c, n, l),
b(existing_b, n, l, b_from_row, b_from_col),
c(bs_c, n, m),
n(n), m(m), l(l),
iblock(0) { }
feedable_strassen_winograd(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: a(bs_c, n, l),
b(bs_c, n, l),
c(bs_c, n, m),
n(n), m(m), l(l),
iblock(0) { }
void begin_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt)
{
if (! AExists)
iblock = &a.bs.acquire(a(block_row, block_col), true);
}
void feed_a_element(const int_type element_num, const vt v)
{
if (! AExists)
(*iblock)[element_num] = v;
}
void end_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
if (! AExists)
{
a.bs.release(a(block_row, block_col), ! zb);
iblock = 0;
}
}
void begin_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt)
{
if (! BExists)
iblock = &b.bs.acquire(b(block_row, block_col), true);
}
void feed_b_element(const int_type element_num, const vt v)
{
if (! BExists)
(*iblock)[element_num] = v;
}
void end_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
if (! BExists)
{
b.bs.release(b(block_row, block_col), ! zb);
iblock = 0;
}
}
void multiply()
{ matrix_operations<ValueType, BlockSideLength>::choose_level_for_feedable_sw(a, b, c); }
zbt begin_reading_block(const size_type& block_row, const size_type& block_col)
{
bool zb = ! c.bs.is_initialized(c(block_row, block_col));
iblock = &c.bs.acquire(c(block_row, block_col));
return zb;
}
vt read_element(const int_type element_num)
{ return (*iblock)[element_num]; }
zbt end_reading_block(const size_type& block_row, const size_type& block_col)
{
c.bs.release(c(block_row, block_col), false);
iblock = 0;
return ! c.bs.is_initialized(c(block_row, block_col));
}
};
template <typename ValueType, unsigned BlockSideLength, unsigned Level>
struct matrix_to_quadtree
{
typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block
typedef static_quadtree<ValueType, Level> vt;
typedef matrix_to_quadtree<ValueType, BlockSideLength, Level - 1> smaller_matrix_to_quadtree;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
smaller_matrix_to_quadtree ul, ur, dl, dr;
matrix_to_quadtree(const swappable_block_matrix_type & matrix)
: ul(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, 0),
ur(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, matrix.get_width()/2),
dl(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, 0),
dr(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, matrix.get_width()/2)
{ assert(! (matrix.get_height() % 2 | matrix.get_width() % 2)); }
matrix_to_quadtree(const swappable_block_matrix_type & matrix,
const size_type height, const size_type width, const size_type from_row, const size_type from_col)
: ul(matrix, height/2, width/2, from_row, from_col),
ur(matrix, height/2, width/2, from_row, from_col + width/2),
dl(matrix, height/2, width/2, from_row + height/2, from_col),
dr(matrix, height/2, width/2, from_row + height/2, from_col + width/2)
{ assert(! (height % 2 | width % 2)); }
void begin_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
ul.begin_feeding_block(block_row, block_col, zb.ul);
ur.begin_feeding_block(block_row, block_col, zb.ur);
dl.begin_feeding_block(block_row, block_col, zb.dl);
dr.begin_feeding_block(block_row, block_col, zb.dr);
}
void feed_element(const int_type element_num, const vt v)
{
ul.feed_element(element_num, v.ul);
ur.feed_element(element_num, v.ur);
dl.feed_element(element_num, v.dl);
dr.feed_element(element_num, v.dr);
}
void feed_and_add_element(const int_type element_num, const vt v)
{
ul.feed_and_add_element(element_num, v.ul);
ur.feed_and_add_element(element_num, v.ur);
dl.feed_and_add_element(element_num, v.dl);
dr.feed_and_add_element(element_num, v.dr);
}
void end_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
ul.end_feeding_block(block_row, block_col, zb.ul);
ur.end_feeding_block(block_row, block_col, zb.ur);
dl.end_feeding_block(block_row, block_col, zb.dl);
dr.end_feeding_block(block_row, block_col, zb.dr);
}
zbt begin_reading_block(const size_type& block_row, const size_type& block_col)
{
zbt zb;
zb.ul = ul.begin_reading_block(block_row, block_col);
zb.ur = ur.begin_reading_block(block_row, block_col);
zb.dl = dl.begin_reading_block(block_row, block_col);
zb.dr = dr.begin_reading_block(block_row, block_col);
return zb;
}
vt read_element(const int_type element_num)
{
vt v;
v.ul = ul.read_element(element_num);
v.ur = ur.read_element(element_num);
v.dl = dl.read_element(element_num);
v.dr = dr.read_element(element_num);
return v;
}
zbt end_reading_block(const size_type& block_row, const size_type& block_col)
{
zbt zb;
zb.ul = ul.end_reading_block(block_row, block_col);
zb.ur = ur.end_reading_block(block_row, block_col);
zb.dl = dl.end_reading_block(block_row, block_col);
zb.dr = dr.end_reading_block(block_row, block_col);
return zb;
}
const size_type & get_height_in_blocks()
{ return ul.get_height_in_blocks(); }
const size_type & get_width_in_blocks()
{ return ul.get_width_in_blocks(); }
};
template <typename ValueType, unsigned BlockSideLength>
struct matrix_to_quadtree<ValueType, BlockSideLength, 0>
{
typedef static_quadtree<bool, 0> zbt; // true <=> is a zero-block
typedef static_quadtree<ValueType, 0> vt;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
swappable_block_matrix_type m;
internal_block_type* iblock;
matrix_to_quadtree(const swappable_block_matrix_type& matrix)
: m(matrix, matrix.get_height(), matrix.get_width(), 0, 0),
iblock(0) { }
matrix_to_quadtree(const swappable_block_matrix_type& matrix,
const size_type height, const size_type width, const size_type from_row, const size_type from_col)
: m(matrix, height, width, from_row, from_col),
iblock(0) { }
void begin_feeding_block(const size_type& block_row, const size_type& block_col, const zbt)
{ iblock = &m.bs.acquire(m(block_row, block_col)); }
void feed_element(const int_type element_num, const vt v)
{ (*iblock)[element_num] = v; }
void feed_and_add_element(const int_type element_num, const vt v)
{ (*iblock)[element_num] += v; }
void end_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb)
{
m.bs.release(m(block_row, block_col), ! zb);
iblock = 0;
}
zbt begin_reading_block(const size_type& block_row, const size_type& block_col)
{
zbt zb = ! m.bs.is_initialized(m(block_row, block_col));
iblock = &m.bs.acquire(m(block_row, block_col));
return zb;
}
vt read_element(const int_type element_num)
{ return (*iblock)[element_num]; }
zbt end_reading_block(const size_type& block_row, const size_type& block_col)
{
m.bs.release(m(block_row, block_col), false);
iblock = 0;
return ! m.bs.is_initialized(m(block_row, block_col));
}
const size_type & get_height_in_blocks()
{ return m.get_height(); }
const size_type & get_width_in_blocks()
{ return m.get_width(); }
};
template <typename ValueType, unsigned BlockSideLength, unsigned Level, bool AExists, bool BExists>
struct feedable_strassen_winograd_block_grained
{
typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block
typedef static_quadtree<ValueType, Level> vt;
typedef feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, AExists, BExists> smaller_feedable_strassen_winograd_ab;
typedef feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, AExists, false> smaller_feedable_strassen_winograd_a;
typedef feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, false, BExists> smaller_feedable_strassen_winograd_b;
typedef feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, false, false> smaller_feedable_strassen_winograd_n;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
typedef matrix_operations<ValueType, BlockSideLength> Ops;
const size_type n, m, l;
smaller_feedable_strassen_winograd_ab p1, p2;
smaller_feedable_strassen_winograd_n p3, p4, p5;
smaller_feedable_strassen_winograd_b p6;
smaller_feedable_strassen_winograd_a p7;
inline feedable_strassen_winograd_block_grained(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: n(n), m(m), l(l),
p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col),
p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col),
p3( bs_c, n/2, m/2, l/2),
p4( bs_c, n/2, m/2, l/2),
p5( bs_c, n/2, m/2, l/2),
p6( bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2),
p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {}
inline feedable_strassen_winograd_block_grained(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: n(n), m(m), l(l),
p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2),
p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2),
p3( bs_c, n/2, m/2, l/2),
p4( bs_c, n/2, m/2, l/2),
p5( bs_c, n/2, m/2, l/2),
p6( bs_c, n/2, m/2, l/2),
p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {}
inline feedable_strassen_winograd_block_grained(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: n(n), m(m), l(l),
p1(bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col),
p2(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col),
p3(bs_c, n/2, m/2, l/2),
p4(bs_c, n/2, m/2, l/2),
p5(bs_c, n/2, m/2, l/2),
p6(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2),
p7(bs_c, n/2, m/2, l/2) {}
inline feedable_strassen_winograd_block_grained(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: n(n), m(m), l(l),
p1(bs_c, n / 2, m / 2, l / 2),
p2(bs_c, n / 2, m / 2, l / 2),
p3(bs_c, n / 2, m / 2, l / 2),
p4(bs_c, n / 2, m / 2, l / 2),
p5(bs_c, n / 2, m / 2, l / 2),
p6(bs_c, n / 2, m / 2, l / 2),
p7(bs_c, n / 2, m / 2, l / 2) { }
inline void feed_a(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl)
{
// partition bl
typename Ops::swappable_block_matrix_quarterer qbl(bl);
// preadditions
swappable_block_matrix_type
s1(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
s2(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
s3(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
s4(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed());
Ops::strassen_winograd_preaddition_a(qbl.ul, qbl.ur, qbl.dl, qbl.dr, s1, s2, s3, s4);
// feed recursive
p1.feed_a(row, col, qbl.ul);
p2.feed_a(row, col, qbl.ur);
p3.feed_a(row, col, s1);
p4.feed_a(row, col, s2);
p5.feed_a(row, col, s3);
p6.feed_a(row, col, s4);
p7.feed_a(row, col, qbl.dr);
}
inline void feed_b(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl)
{
// partition bl
typename Ops::swappable_block_matrix_quarterer qbl(bl);
// preadditions
swappable_block_matrix_type
t1(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
t2(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
t3(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()),
t4(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed());
Ops::strassen_winograd_preaddition_b(qbl.ul, qbl.ur, qbl.dl, qbl.dr, t1, t2, t3, t4);
// feed recursive
p1.feed_b(row, col, qbl.ul);
p2.feed_b(row, col, qbl.dl);
p3.feed_b(row, col, t1);
p4.feed_b(row, col, t2);
p5.feed_b(row, col, t3);
p6.feed_b(row, col, qbl.dr);
p7.feed_b(row, col, t4);
}
inline void multiply()
{
p1.multiply();
p2.multiply();
p3.multiply();
p4.multiply();
p5.multiply();
p6.multiply();
p7.multiply();
}
inline void read_and_add(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl)
{
// partition bl
typename Ops::swappable_block_matrix_quarterer qbl(bl);
// postadditions
swappable_block_matrix_type px(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed());
p2.read_and_add(row, col, qbl.ul);
p1.read_and_add(row, col, px);
Ops::element_op(qbl.ul, px, typename Ops::addition());
p4.read_and_add(row, col, px);
Ops::element_op(qbl.ur, px, typename Ops::addition());
p5.read_and_add(row, col, px);
Ops::element_op_twice_nontransposed(qbl.dl, qbl.dr, px, typename Ops::addition());
px.set_zero();
p7.read_and_add(row, col, qbl.dl);
p3.read_and_add(row, col, px);
Ops::element_op_twice_nontransposed(qbl.dr, qbl.ur, px, typename Ops::addition());
p6.read_and_add(row, col, qbl.ur);
}
inline static unsigned_type get_num_temp_grains()
{ return smaller_feedable_strassen_winograd_ab::get_num_temp_grains() + (4 ^ Level) * 2; }
};
template <typename ValueType, unsigned BlockSideLength, bool AExists, bool BExists>
struct feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, 0, AExists, BExists>
{
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename swappable_block_matrix_type::swappable_block_identifier_type swappable_block_identifier_type;
typedef typename swappable_block_matrix_type::size_type size_type;
typedef matrix_operations<ValueType, BlockSideLength> Ops;
typedef static_quadtree<swappable_block_identifier_type, 0> bt;
swappable_block_matrix_type a, b, c;
inline feedable_strassen_winograd_block_grained(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: a(existing_a, n, l, a_from_row, a_from_col),
b(existing_b, n, l, b_from_row, b_from_col),
c(bs_c, n, m) { }
inline feedable_strassen_winograd_block_grained(
const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col,
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: a(existing_a, n, l, a_from_row, a_from_col),
b(bs_c, n, l),
c(bs_c, n, m) { }
inline feedable_strassen_winograd_block_grained(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l,
const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col)
: a(bs_c, n, l),
b(existing_b, n, l, b_from_row, b_from_col),
c(bs_c, n, m) { }
inline feedable_strassen_winograd_block_grained(
block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l)
: a(bs_c, n, l),
b(bs_c, n, l),
c(bs_c, n, m) { }
inline void feed_a(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl)
{
if (! AExists)
{
// copy bl to a from (row, col) (assuming a from (row, col) == 0)
swappable_block_matrix_type at(a, bl.get_height(), bl.get_width(), row, col);
Ops::element_op(at, bl, typename Ops::addition());
}
}
inline void feed_b(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl)
{
if (! BExists)
{
// copy bl(0,0) to b(row, col) (assuming b from (row, col) == 0)
swappable_block_matrix_type bt(b, bl.get_height(), bl.get_width(), row, col);
Ops::element_op(bt, bl, typename Ops::addition());
}
}
inline void multiply()
{
matrix_operations<ValueType, BlockSideLength>::
multi_level_strassen_winograd_multiply_and_add_block_grained(a, b, c);
if (! AExists)
a.set_zero();
if (! BExists)
b.set_zero();
}
inline void read_and_add(const size_type& row, const size_type& col, swappable_block_matrix_type& bl)
{
// add c from (row, col) to bl
swappable_block_matrix_type ct(c, bl.get_height(), bl.get_width(), row, col);
Ops::element_op(bl, ct, typename Ops::addition());
ct.set_zero();
}
inline static unsigned_type get_num_temp_grains()
{ return 0; }
};
template <typename ValueType, unsigned BlockSideLength, unsigned Level, unsigned Granularity>
struct matrix_to_quadtree_block_grained
{
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::size_type size_type;
typedef matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level - 1, Granularity> smaller_matrix_to_quadtree_block_grained;
smaller_matrix_to_quadtree_block_grained ul, ur, dl, dr;
inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type & matrix)
: ul(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, 0),
ur(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, matrix.get_width()/2),
dl(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, 0),
dr(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, matrix.get_width()/2)
{ assert(! (matrix.get_height() % 2 | matrix.get_width() % 2)); }
inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type & matrix,
const size_type height, const size_type width, const size_type from_row, const size_type from_col)
: ul(matrix, height/2, width/2, from_row, from_col),
ur(matrix, height/2, width/2, from_row, from_col + width/2),
dl(matrix, height/2, width/2, from_row + height/2, from_col),
dr(matrix, height/2, width/2, from_row + height/2, from_col + width/2)
{ assert(! (height % 2 | width % 2)); }
inline swappable_block_matrix_type operator () (const size_type& row, const size_type& col)
{
return swappable_block_matrix_type(ul(row, col), ur(row, col), dl(row, col), dr(row, col));
}
inline const size_type get_height()
{ return ul.get_height(); }
inline const size_type get_width()
{ return ul.get_width(); }
};
template <typename ValueType, unsigned BlockSideLength, unsigned Granularity>
struct matrix_to_quadtree_block_grained<ValueType, BlockSideLength, 0, Granularity>
{
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::size_type size_type;
swappable_block_matrix_type m;
inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type& matrix)
: m(matrix, matrix.get_height(), matrix.get_width(), 0, 0)
{ assert(! (matrix.get_height() % Granularity | matrix.get_width() % Granularity)); }
inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type& matrix,
const size_type height, const size_type width, const size_type from_row, const size_type from_col)
: m(matrix, height, width, from_row, from_col)
{ assert(! (matrix.get_height() % Granularity | matrix.get_width() % Granularity)); }
inline swappable_block_matrix_type operator () (const size_type& row, const size_type& col)
{
return swappable_block_matrix_type(m, Granularity, Granularity, row * Granularity, col * Granularity);
}
inline const size_type get_height()
{ return m.get_height() / Granularity; }
inline const size_type get_width()
{ return m.get_width() / Granularity; }
};
template <typename ValueType, unsigned BlockSideLength>
struct matrix_operations
{
// tuning-parameter: Only matrices larger than this (in blocks) are processed by Strassen-Winograd.
// you have to adapt choose_level_for_feedable_sw, too
static const int_type strassen_winograd_base_case_size;
typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type;
typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type;
typedef typename swappable_block_matrix_type::swappable_block_identifier_type swappable_block_identifier_type;
typedef typename block_scheduler_type::internal_block_type internal_block_type;
typedef typename swappable_block_matrix_type::size_type size_type;
typedef column_vector<ValueType> column_vector_type;
typedef row_vector<ValueType> row_vector_type;
typedef typename column_vector_type::size_type vector_size_type;
// +-+-+-+ addition +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct addition
{
/* op(c,a,b) means c = a <op> b e.g. assign sum
* op(c,a) means c <op>= a e.g. add up
* op(a) means <op>a e.g. sign
*
* it should hold:
* op(c,0,0) equivalent c = 0
* op(c=0,a) equivalent c = op(a)
* op(c,0) equivalent {}
*/
inline ValueType& operator () (ValueType& c, const ValueType& a, const ValueType& b) { return c = a + b; }
inline ValueType& operator () (ValueType& c, const ValueType& a) { return c += a; }
inline ValueType operator () (const ValueType& a) { return +a; }
};
struct subtraction
{
inline ValueType& operator () (ValueType& c, const ValueType& a, const ValueType& b) { return c = a - b; }
inline ValueType& operator () (ValueType& c, const ValueType& a) { return c -= a; }
inline ValueType operator () (const ValueType& a) { return -a; }
};
struct scalar_multiplication
{
inline scalar_multiplication(const ValueType scalar = 1) : s(scalar) { }
inline ValueType& operator () (ValueType& c, const ValueType& a) { return c = a * s; }
inline ValueType operator () (const ValueType& a) { return a * s; }
inline operator const ValueType& () { return s; }
const ValueType s;
};
// element_op<Op>(C,A,B) calculates C = A <Op> B
template <class Op>
static swappable_block_matrix_type&
element_op(swappable_block_matrix_type& C,
const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B, Op op = Op())
{
for (size_type row = 0; row < C.get_height(); ++row)
for (size_type col = 0; col < C.get_width(); ++col)
element_op_swappable_block(
C(row, col), C.is_transposed(), C.bs,
A(row, col), A.is_transposed(), A.bs,
B(row, col), B.is_transposed(), B.bs, op);
return C;
}
// element_op<Op>(C,A) calculates C <Op>= A
template <class Op>
static swappable_block_matrix_type&
element_op(swappable_block_matrix_type& C,
const swappable_block_matrix_type& A, Op op = Op())
{
for (size_type row = 0; row < C.get_height(); ++row)
for (size_type col = 0; col < C.get_width(); ++col)
element_op_swappable_block(
C(row, col), C.is_transposed(), C.bs,
A(row, col), A.is_transposed(), A.bs, op);
return C;
}
// element_op<Op>(C) calculates C = <Op>C
template <class Op>
static swappable_block_matrix_type&
element_op(swappable_block_matrix_type& C, Op op = Op())
{
for (size_type row = 0; row < C.get_height(); ++row)
for (size_type col = 0; col < C.get_width(); ++col)
element_op_swappable_block(
C(row, col), C.bs, op);
return C;
}
// calculates c = a <Op> b
template <class Op>
static void
element_op_swappable_block(
const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c,
const swappable_block_identifier_type a, bool a_is_transposed, block_scheduler_type& bs_a,
const swappable_block_identifier_type b, bool b_is_transposed, block_scheduler_type& bs_b, Op op = Op())
{
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_addition_calls;
// check if zero-block (== ! initialized)
if (! bs_a.is_initialized(a) && ! bs_b.is_initialized(b))
{
// => a and b are zero -> set c zero
bs_c.deinitialize(c);
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero;
return;
}
a_is_transposed = a_is_transposed != c_is_transposed;
b_is_transposed = b_is_transposed != c_is_transposed;
if (! bs_a.is_initialized(a))
{
// a is zero -> copy b
internal_block_type& ic = bs_c.acquire(c, true),
& ib = bs_b.acquire(b);
if (! bs_c.is_simulating())
{
if (b_is_transposed)
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, true, Op>(&ic[0], 0, &ib[0], op);
else
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], 0, &ib[0], op);
}
bs_b.release(b, false);
bs_c.release(c, true);
}
else if (! bs_b.is_initialized(b))
{
// b is zero -> copy a
internal_block_type& ic = bs_c.acquire(c, true),
& ia = bs_a.acquire(a);
if (! bs_c.is_simulating())
{
if (a_is_transposed)
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, false, Op>(&ic[0], &ia[0], 0, op);
else
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], &ia[0], 0, op);
}
bs_a.release(a, false);
bs_c.release(c, true);
}
else
{
internal_block_type& ic = bs_c.acquire(c, true),
& ia = bs_a.acquire(a),
& ib = bs_b.acquire(b);
if (! bs_c.is_simulating())
{
if (a_is_transposed)
{
if (b_is_transposed)
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, true, Op>(&ic[0], &ia[0], &ib[0], op);
else
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, false, Op>(&ic[0], &ia[0], &ib[0], op);
}
else
{
if (b_is_transposed)
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, true, Op>(&ic[0], &ia[0], &ib[0], op);
else
low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], &ia[0], &ib[0], op);
}
}
bs_a.release(a, false);
bs_b.release(b, false);
bs_c.release(c, true);
}
}
// calculates c <op>= a
template <class Op>
static void
element_op_swappable_block(
const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c,
const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, Op op = Op())
{
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_addition_calls;
// check if zero-block (== ! initialized)
if (! bs_a.is_initialized(a))
{
// => b is zero => nothing to do
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero;
return;
}
const bool c_is_zero = ! bs_c.is_initialized(c);
// acquire
internal_block_type& ic = bs_c.acquire(c, c_is_zero),
& ia = bs_a.acquire(a);
// add
if (! bs_c.is_simulating())
{
if (c_is_zero) {
if (c_is_transposed == a_is_transposed)
low_level_matrix_unary_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ia[0], op);
else
low_level_matrix_unary_op<ValueType, BlockSideLength, true, Op>(&ic[0], &ia[0], op);
}
else {
if (c_is_transposed == a_is_transposed)
low_level_matrix_unary_ass_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ia[0], op);
else
low_level_matrix_unary_ass_op<ValueType, BlockSideLength, true, Op>(&ic[0], &ia[0], op);
}
}
// release
bs_c.release(c, true);
bs_a.release(a, false);
}
// calculates c = <op>c
template <class Op>
static void
element_op_swappable_block(
const swappable_block_identifier_type c, block_scheduler_type& bs_c, Op op = Op())
{
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_addition_calls;
// check if zero-block (== ! initialized)
if (! bs_c.is_initialized(c))
{
// => c is zero => nothing to do
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero;
return;
}
// acquire
internal_block_type& ic = bs_c.acquire(c);
// add
if (! bs_c.is_simulating())
low_level_matrix_unary_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ic[0], op);
// release
bs_c.release(c, true);
}
// additions for strassen-winograd
inline static void
strassen_winograd_preaddition_a(swappable_block_matrix_type& a11,
swappable_block_matrix_type& a12,
swappable_block_matrix_type& a21,
swappable_block_matrix_type& a22,
swappable_block_matrix_type& s1,
swappable_block_matrix_type& s2,
swappable_block_matrix_type& s3,
swappable_block_matrix_type& s4)
{
for (size_type row = 0; row < a11.get_height(); ++row)
for (size_type col = 0; col < a11.get_width(); ++col)
{
op_swappable_block_nontransposed(s3, a11, subtraction(), a21, row, col);
op_swappable_block_nontransposed(s1, a21, addition(), a22, row, col);
op_swappable_block_nontransposed(s2, s1, subtraction(), a11, row, col);
op_swappable_block_nontransposed(s4, a12, subtraction(), s2, row, col);
}
}
inline static void
strassen_winograd_preaddition_b(swappable_block_matrix_type& b11,
swappable_block_matrix_type& b12,
swappable_block_matrix_type& b21,
swappable_block_matrix_type& b22,
swappable_block_matrix_type& t1,
swappable_block_matrix_type& t2,
swappable_block_matrix_type& t3,
swappable_block_matrix_type& t4)
{
for (size_type row = 0; row < b11.get_height(); ++row)
for (size_type col = 0; col < b11.get_width(); ++col)
{
op_swappable_block_nontransposed(t3, b22, subtraction(), b12, row, col);
op_swappable_block_nontransposed(t1, b12, subtraction(), b11, row, col);
op_swappable_block_nontransposed(t2, b22, subtraction(), t1, row, col);
op_swappable_block_nontransposed(t4, b21, subtraction(), t2, row, col);
}
}
inline static void
strassen_winograd_postaddition(swappable_block_matrix_type& c11, // = p2
swappable_block_matrix_type& c12, // = p6
swappable_block_matrix_type& c21, // = p7
swappable_block_matrix_type& c22, // = p4
swappable_block_matrix_type& p1,
swappable_block_matrix_type& p3,
swappable_block_matrix_type& p5)
{
for (size_type row = 0; row < c11.get_height(); ++row)
for (size_type col = 0; col < c11.get_width(); ++col)
{
op_swappable_block_nontransposed(c11, addition(), p1, row, col); // (u1)
op_swappable_block_nontransposed( p1, addition(), c22, row, col); // (u2)
op_swappable_block_nontransposed( p5, addition(), p1, row, col); // (u3)
op_swappable_block_nontransposed(c21, addition(), p5, row, col); // (u4)
op_swappable_block_nontransposed(c22, p5, addition(), p3, row, col); // (u5)
op_swappable_block_nontransposed( p1, addition(), p3, row, col); // (u6)
op_swappable_block_nontransposed(c12, addition(), p1, row, col); // (u7)
}
}
// calculates c1 += a; c2 += a
template <class Op>
inline static void
element_op_twice_nontransposed(swappable_block_matrix_type& c1,
swappable_block_matrix_type& c2,
const swappable_block_matrix_type& a, Op op = Op())
{
for (size_type row = 0; row < a.get_height(); ++row)
for (size_type col = 0; col < a.get_width(); ++col)
{
element_op_swappable_block(
c1(row, col), false, c1.bs,
a(row, col), false, a.bs, op);
element_op_swappable_block(
c2(row, col), false, c2.bs,
a(row, col), false, a.bs, op);
}
}
template <class Op>
inline static void
op_swappable_block_nontransposed(swappable_block_matrix_type& c,
swappable_block_matrix_type& a, Op op, swappable_block_matrix_type& b,
size_type& row, size_type& col)
{
element_op_swappable_block(
c(row, col), false, c.bs,
a(row, col), false, a.bs,
b(row, col), false, b.bs, op);
}
template <class Op>
inline static void
op_swappable_block_nontransposed(swappable_block_matrix_type& c, Op op, swappable_block_matrix_type& a,
size_type& row, size_type& col)
{
element_op_swappable_block(
c(row, col), false, c.bs,
a(row, col), false, a.bs, op);
}
// +-+ end addition +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// +-+-+-+ matrix multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* n, m and l denote the three dimensions of a matrix multiplication, according to the following ascii-art diagram:
*
* +--m--+
* +----l-----+ | | +--m--+
* | | | | | |
* n A | • l B | = n C |
* | | | | | |
* +----------+ | | +-----+
* +-----+
*
* The index-variables are called i, j, k for dimension
* n, m, l .
*/
// requires height and width divisible by 2
struct swappable_block_matrix_quarterer
{
swappable_block_matrix_type upleft, upright,
downleft, downright,
& ul, & ur, & dl, & dr;
swappable_block_matrix_quarterer(const swappable_block_matrix_type & whole)
: upleft (whole, whole.get_height()/2, whole.get_width()/2, 0, 0),
upright (whole, whole.get_height()/2, whole.get_width()/2, 0, whole.get_width()/2),
downleft (whole, whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, 0),
downright(whole, whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, whole.get_width()/2),
ul(upleft), ur(upright), dl(downleft), dr(downright)
{ assert(! (whole.get_height() % 2 | whole.get_width() % 2)); }
};
struct swappable_block_matrix_padding_quarterer
{
swappable_block_matrix_type upleft, upright,
downleft, downright,
& ul, & ur, & dl, & dr;
swappable_block_matrix_padding_quarterer(const swappable_block_matrix_type & whole)
: upleft (whole, div_ceil(whole.get_height(),2), div_ceil(whole.get_width(),2), 0, 0),
upright (whole, div_ceil(whole.get_height(),2), div_ceil(whole.get_width(),2), 0, div_ceil(whole.get_width(),2)),
downleft (whole, div_ceil(whole.get_height(),2), div_ceil(whole.get_width(),2), div_ceil(whole.get_height(),2), 0),
downright(whole, div_ceil(whole.get_height(),2), div_ceil(whole.get_width(),2), div_ceil(whole.get_height(),2), div_ceil(whole.get_width(),2)),
ul(upleft), ur(upright), dl(downleft), dr(downright) {}
};
struct swappable_block_matrix_approximative_quarterer
{
swappable_block_matrix_type upleft, upright,
downleft, downright,
& ul, & ur, & dl, & dr;
swappable_block_matrix_approximative_quarterer(const swappable_block_matrix_type & whole)
: upleft (whole, whole.get_height()/2, whole.get_width()/2, 0, 0),
upright (whole, whole.get_height()/2, whole.get_width() - whole.get_width()/2, 0, whole.get_width()/2),
downleft (whole, whole.get_height() - whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, 0),
downright(whole, whole.get_height() - whole.get_height()/2, whole.get_width() - whole.get_width()/2, whole.get_height()/2, whole.get_width()/2),
ul(upleft), ur(upright), dl(downleft), dr(downright) {}
};
//! calculates C = A * B + C
// requires fitting dimensions
static swappable_block_matrix_type&
multi_level_strassen_winograd_multiply_and_add_block_grained(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
int_type num_levels = ilog2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height())));
if (num_levels > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE)
{
if (num_levels > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS)
num_levels = STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS;
swappable_block_matrix_type padded_a(A, round_up_to_power_of_two(A.get_height(), num_levels),
round_up_to_power_of_two(A.get_width(), num_levels), 0, 0),
padded_b(B, round_up_to_power_of_two(B.get_height(), num_levels),
round_up_to_power_of_two(B.get_width(), num_levels), 0, 0),
padded_c(C, round_up_to_power_of_two(C.get_height(), num_levels),
round_up_to_power_of_two(C.get_width(), num_levels), 0, 0);
switch (num_levels)
{
#if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 5 && 5 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE)
case 5:
use_feedable_sw_block_grained<5>(padded_a, padded_a, padded_c);
break;
#endif
#if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 4 && 4 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE)
case 4:
use_feedable_sw_block_grained<4>(padded_a, padded_a, padded_c);
break;
#endif
#if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 3 && 3 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE)
case 3:
use_feedable_sw_block_grained<3>(padded_a, padded_a, padded_c);
break;
#endif
#if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 2 && 2 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE)
case 2:
use_feedable_sw_block_grained<2>(padded_a, padded_a, padded_c);
break;
#endif
default: // only here in case of wrong bounds
strassen_winograd_multiply_and_add_interleaved(A, B, C);
break;
}
}
else
// base case
strassen_winograd_multiply_and_add_interleaved(A, B, C);
return C;
}
// input matrices have to be padded
template <unsigned Level>
static void use_feedable_sw_block_grained(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
const unsigned granularity = 1;
feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level, true, true>
fsw(A, 0, 0, C.bs, C.get_height(), C.get_width(), A.get_width(), B, 0, 0);
// preadditions for A
{
matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity>
mtq_a(A);
for (size_type row = 0; row < mtq_a.get_height(); ++row)
for (size_type col = 0; col < mtq_a.get_width(); ++col)
fsw.feed_a(row, col, mtq_a(row, col));
}
// preadditions for B
{
matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity>
mtq_b(B);
for (size_type row = 0; row < mtq_b.get_height(); ++row)
for (size_type col = 0; col < mtq_b.get_width(); ++col)
fsw.feed_b(row, col, mtq_b(row, col));
}
// recursive multiplications
fsw.multiply();
// postadditions
{
matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity>
mtq_c(C);
for (size_type row = 0; row < mtq_c.get_height(); ++row)
for (size_type col = 0; col < mtq_c.get_width(); ++col)
fsw.read_and_add(row, col, mtq_c(row, col));
}
}
//! calculates C = A * B + C
// requires fitting dimensions
static swappable_block_matrix_type&
multi_level_strassen_winograd_multiply_and_add(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
int_type p = ilog2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height())));
swappable_block_matrix_type padded_a(A, round_up_to_power_of_two(A.get_height(), p),
round_up_to_power_of_two(A.get_width(), p), 0, 0),
padded_b(B, round_up_to_power_of_two(B.get_height(), p),
round_up_to_power_of_two(B.get_width(), p), 0, 0),
padded_c(C, round_up_to_power_of_two(C.get_height(), p),
round_up_to_power_of_two(C.get_width(), p), 0, 0);
choose_level_for_feedable_sw(padded_a, padded_b, padded_c);
return C;
}
// input matrices have to be padded
static void choose_level_for_feedable_sw(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
switch (ilog2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height()))))
{
default:
/*
use_feedable_sw<4>(A, B, C);
break;
case 3:
use_feedable_sw<3>(A, B, C);
break;
case 2:*/
use_feedable_sw<2>(A, B, C);
break;
case 1:
/*use_feedable_sw<1>(A, B, C);
break;*/
case 0:
// base case
recursive_multiply_and_add(A, B, C);
break;
}
}
// input matrices have to be padded
template <unsigned Level>
static void use_feedable_sw(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
feedable_strassen_winograd<ValueType, BlockSideLength, Level, true, true>
fsw(A, 0, 0, C.bs, C.get_height(), C.get_width(), A.get_width(), B, 0, 0);
// preadditions for A
matrix_to_quadtree<ValueType, BlockSideLength, Level>
mtq_a(A);
for (size_type block_row = 0; block_row < mtq_a.get_height_in_blocks(); ++block_row)
for (size_type block_col = 0; block_col < mtq_a.get_width_in_blocks(); ++block_col)
{
fsw.begin_feeding_a_block(block_row, block_col,
mtq_a.begin_reading_block(block_row, block_col));
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (int_type element_row_in_block = 0; element_row_in_block < int_type(BlockSideLength); ++element_row_in_block)
for (int_type element_col_in_block = 0; element_col_in_block < int_type(BlockSideLength); ++element_col_in_block)
fsw.feed_a_element(element_row_in_block * BlockSideLength + element_col_in_block,
mtq_a.read_element(element_row_in_block * BlockSideLength + element_col_in_block));
fsw.end_feeding_a_block(block_row, block_col,
mtq_a.end_reading_block(block_row, block_col));
}
// preadditions for B
matrix_to_quadtree<ValueType, BlockSideLength, Level>
mtq_b(B);
for (size_type block_row = 0; block_row < mtq_b.get_height_in_blocks(); ++block_row)
for (size_type block_col = 0; block_col < mtq_b.get_width_in_blocks(); ++block_col)
{
fsw.begin_feeding_b_block(block_row, block_col,
mtq_b.begin_reading_block(block_row, block_col));
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (int_type element_row_in_block = 0; element_row_in_block < int_type(BlockSideLength); ++element_row_in_block)
for (int_type element_col_in_block = 0; element_col_in_block < int_type(BlockSideLength); ++element_col_in_block)
fsw.feed_b_element(element_row_in_block * BlockSideLength + element_col_in_block,
mtq_b.read_element(element_row_in_block * BlockSideLength + element_col_in_block));
fsw.end_feeding_b_block(block_row, block_col,
mtq_b.end_reading_block(block_row, block_col));
}
// recursive multiplications
fsw.multiply();
// postadditions
matrix_to_quadtree<ValueType, BlockSideLength, Level>
mtq_c(C);
for (size_type block_row = 0; block_row < mtq_c.get_height_in_blocks(); ++block_row)
for (size_type block_col = 0; block_col < mtq_c.get_width_in_blocks(); ++block_col)
{
mtq_c.begin_feeding_block(block_row, block_col,
fsw.begin_reading_block(block_row, block_col));
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (int_type element_row_in_block = 0; element_row_in_block < int_type(BlockSideLength); ++element_row_in_block)
for (int_type element_col_in_block = 0; element_col_in_block < int_type(BlockSideLength); ++element_col_in_block)
mtq_c.feed_and_add_element(element_row_in_block * BlockSideLength + element_col_in_block,
fsw.read_element(element_row_in_block * BlockSideLength + element_col_in_block));
mtq_c.end_feeding_block(block_row, block_col,
fsw.end_reading_block(block_row, block_col));
}
}
//! calculates C = A * B
// assumes fitting dimensions
static swappable_block_matrix_type&
strassen_winograd_multiply(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
// base case
if (C.get_height() <= strassen_winograd_base_case_size
|| C.get_width() <= strassen_winograd_base_case_size
|| A.get_width() <= strassen_winograd_base_case_size)
{
C.set_zero();
return recursive_multiply_and_add(A, B, C);
}
// partition matrix
swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C);
// preadditions
swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s2(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s3(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s4(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
t1(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t2(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t3(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t4(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed());
strassen_winograd_preaddition_a(qa.ul, qa.ur, qa.dl, qa.dr, s1, s2, s3, s4);
strassen_winograd_preaddition_b(qb.ul, qb.ur, qb.dl, qb.dr, t1, t2, t3, t4);
// recursive multiplications
swappable_block_matrix_type p1(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()),
// p2 stored in qc.ul
p3(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()),
// p4 stored in qc.dr
p5(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed());
// p6 stored in qc.ur
// p7 stored in qc.dl
strassen_winograd_multiply(qa.ul, qb.ul, p1);
strassen_winograd_multiply(qa.ur, qb.dl, qc.ul);
strassen_winograd_multiply( s1, t1, p3);
strassen_winograd_multiply( s2, t2, qc.dr);
strassen_winograd_multiply( s3, t3, p5);
strassen_winograd_multiply( s4, qb.dr, qc.ur);
strassen_winograd_multiply(qa.dr, t4, qc.dl);
// postadditions
strassen_winograd_postaddition(qc.ul, qc.ur, qc.dl, qc.dr, p1, p3, p5);
return C;
}
//! calculates C = A * B + C
// assumes fitting dimensions
static swappable_block_matrix_type&
strassen_winograd_multiply_and_add_interleaved(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
// base case
if (C.get_height() <= strassen_winograd_base_case_size
|| C.get_width() <= strassen_winograd_base_case_size
|| A.get_width() <= strassen_winograd_base_case_size)
return recursive_multiply_and_add(A, B, C);
// partition matrix
swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C);
// preadditions
swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s2(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s3(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
s4(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()),
t1(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t2(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t3(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()),
t4(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed());
strassen_winograd_preaddition_a(qa.ul, qa.ur, qa.dl, qa.dr, s1, s2, s3, s4);
strassen_winograd_preaddition_b(qb.ul, qb.ur, qb.dl, qb.dr, t1, t2, t3, t4);
// recursive multiplications and postadditions
swappable_block_matrix_type px(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed());
strassen_winograd_multiply_and_add_interleaved(qa.ur, qb.dl, qc.ul); // p2
strassen_winograd_multiply_and_add_interleaved(qa.ul, qb.ul, px); // p1
element_op<addition>(qc.ul, px);
strassen_winograd_multiply_and_add_interleaved(s2, t2, px); // p4
s2.set_zero();
t2.set_zero();
element_op<addition>(qc.ur, px);
strassen_winograd_multiply_and_add_interleaved(s3, t3, px); // p5
s3.set_zero();
t3.set_zero();
element_op_twice_nontransposed<addition>(qc.dl, qc.dr, px);
px.set_zero();
strassen_winograd_multiply_and_add_interleaved(qa.dr, t4, qc.dl); // p7
t4.set_zero();
strassen_winograd_multiply_and_add_interleaved(s1, t1, px); // p3
s1.set_zero();
t1.set_zero();
element_op_twice_nontransposed<addition>(qc.dr, qc.ur, px);
px.set_zero();
strassen_winograd_multiply_and_add_interleaved(s4, qb.dr, qc.ur); // p6
return C;
}
//! calculates C = A * B + C
// assumes fitting dimensions
static swappable_block_matrix_type&
strassen_winograd_multiply_and_add(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
// base case
if (C.get_height() <= strassen_winograd_base_case_size
|| C.get_width() <= strassen_winograd_base_case_size
|| A.get_width() <= strassen_winograd_base_case_size)
return recursive_multiply_and_add(A, B, C);
// partition matrix
swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C);
// preadditions
swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width()),
s2(C.bs, qa.ul.get_height(), qa.ul.get_width()),
s3(C.bs, qa.ul.get_height(), qa.ul.get_width()),
s4(C.bs, qa.ul.get_height(), qa.ul.get_width()),
t1(C.bs, qb.ul.get_height(), qb.ul.get_width()),
t2(C.bs, qb.ul.get_height(), qb.ul.get_width()),
t3(C.bs, qb.ul.get_height(), qb.ul.get_width()),
t4(C.bs, qb.ul.get_height(), qb.ul.get_width());
element_op<subtraction>(s3, qa.ul, qa.dl);
element_op<addition>(s1, qa.dl, qa.dr);
element_op<subtraction>(s2, s1, qa.ul);
element_op<subtraction>(s4, qa.ur, s2);
element_op<subtraction>(t3, qb.dr, qb.ur);
element_op<subtraction>(t1, qb.ur, qb.ul);
element_op<subtraction>(t2, qb.dr, t1);
element_op<subtraction>(t4, qb.dl, t2);
// recursive multiplications and postadditions
swappable_block_matrix_type px(C.bs, qc.ul.get_height(), qc.ul.get_width());
strassen_winograd_multiply_and_add(qa.ur, qb.dl, qc.ul); // p2
strassen_winograd_multiply_and_add(qa.ul, qb.ul, px); // p1
element_op<addition>(qc.ul, px);
strassen_winograd_multiply_and_add(s2, t2, px); // p4
element_op<addition>(qc.ur, px);
strassen_winograd_multiply_and_add(s3, t3, px); // p5
element_op<addition>(qc.dl, px);
element_op<addition>(qc.dr, px);
px.set_zero();
strassen_winograd_multiply_and_add(qa.dr, t4, qc.dl); // p7
strassen_winograd_multiply_and_add(s1, t1, px); // p3
element_op<addition>(qc.dr, px);
element_op<addition>(qc.ur, px);
strassen_winograd_multiply_and_add(s4, qb.dr, qc.ur); // p6
return C;
}
//! calculates C = A * B + C
// assumes fitting dimensions
static swappable_block_matrix_type&
recursive_multiply_and_add(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
// catch empty intervals
if (C.get_height() * C.get_width() * A.get_width() == 0)
return C;
// base case
if ((C.get_height() == 1) + (C.get_width() == 1) + (A.get_width() == 1) >= 2)
return naive_multiply_and_add(A, B, C);
// partition matrix
swappable_block_matrix_approximative_quarterer qa(A), qb(B), qc(C);
// recursive multiplication
// The order of recursive calls is optimized to enhance locality. C has priority because it has to be read and written.
recursive_multiply_and_add(qa.ul, qb.ul, qc.ul);
recursive_multiply_and_add(qa.ur, qb.dl, qc.ul);
recursive_multiply_and_add(qa.ur, qb.dr, qc.ur);
recursive_multiply_and_add(qa.ul, qb.ur, qc.ur);
recursive_multiply_and_add(qa.dl, qb.ur, qc.dr);
recursive_multiply_and_add(qa.dr, qb.dr, qc.dr);
recursive_multiply_and_add(qa.dr, qb.dl, qc.dl);
recursive_multiply_and_add(qa.dl, qb.ul, qc.dl);
return C;
}
//! calculates C = A * B + C
// requires fitting dimensions
static swappable_block_matrix_type&
naive_multiply_and_add(const swappable_block_matrix_type& A,
const swappable_block_matrix_type& B,
swappable_block_matrix_type& C)
{
const size_type& n = C.get_height(),
& m = C.get_width(),
& l = A.get_width();
for (size_type i = 0; i < n; ++i)
for (size_type j = 0; j < m; ++j)
for (size_type k = 0; k < l; ++k)
multiply_and_add_swappable_block(A(i, k), A.is_transposed(), A.bs,
B(k, j), B.is_transposed(), B.bs,
C(i, j), C.is_transposed(), C.bs);
return C;
}
static void multiply_and_add_swappable_block(
const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a,
const swappable_block_identifier_type b, const bool b_is_transposed, block_scheduler_type& bs_b,
const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c)
{
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_multiplication_calls;
// check if zero-block (== ! initialized)
if (! bs_a.is_initialized(a) || ! bs_b.is_initialized(b))
{
// => one factor is zero => product is zero
if (! bs_c.is_simulating())
++matrix_operation_statistic::get_instance()->block_multiplications_saved_through_zero;
return;
}
// acquire
ValueType* ap = bs_a.acquire(a).begin(),
* bp = bs_b.acquire(b).begin(),
* cp = bs_c.acquire(c).begin();
// multiply
if (! bs_c.is_simulating())
low_level_matrix_multiply_and_add<ValueType, BlockSideLength>
(ap, a_is_transposed, bp, b_is_transposed, cp, c_is_transposed);
// release
bs_a.release(a, false);
bs_b.release(b, false);
bs_c.release(c, true);
}
// +-+ end matrix multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// +-+-+-+ matrix-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//! calculates z = A * x
static column_vector_type&
recursive_matrix_col_vector_multiply_and_add(const swappable_block_matrix_type& A,
const column_vector_type& x, column_vector_type& z,
const vector_size_type offset_x = 0, const vector_size_type offset_z = 0)
{
// catch empty intervals
if (A.get_height() * A.get_width() == 0)
return z;
// base case
if (A.get_height() == 1 || A.get_width() == 1)
return naive_matrix_col_vector_multiply_and_add(A, x, z, offset_x, offset_z);
// partition matrix
swappable_block_matrix_approximative_quarterer qa(A);
// recursive multiplication
// The order of recursive calls is optimized to enhance locality.
recursive_matrix_col_vector_multiply_and_add(qa.ul, x, z, offset_x, offset_z );
recursive_matrix_col_vector_multiply_and_add(qa.ur, x, z, offset_x + qa.ul.get_width(), offset_z );
recursive_matrix_col_vector_multiply_and_add(qa.dr, x, z, offset_x + qa.ul.get_width(), offset_z + qa.ul.get_height());
recursive_matrix_col_vector_multiply_and_add(qa.dl, x, z, offset_x, offset_z + qa.ul.get_height());
return z;
}
static column_vector_type&
naive_matrix_col_vector_multiply_and_add(const swappable_block_matrix_type& A,
const column_vector_type& x, column_vector_type& z,
const vector_size_type offset_x = 0, const vector_size_type offset_z = 0)
{
for (size_type row = 0; row < A.get_height(); ++row)
for (size_type col = 0; col < A.get_width(); ++col)
matrix_col_vector_multiply_and_add_swappable_block(A(row, col), A.is_transposed(), A.bs,
x, z, (offset_x + col) * BlockSideLength, (offset_z + row) * BlockSideLength);
return z;
}
static void matrix_col_vector_multiply_and_add_swappable_block(
const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a,
const column_vector_type& x, column_vector_type& z,
const vector_size_type offset_x = 0, const vector_size_type offset_z = 0)
{
// check if zero-block (== ! initialized)
if (! bs_a.is_initialized(a))
{
// => matrix is zero => product is zero
return;
}
// acquire
internal_block_type& ia = bs_a.acquire(a);
// multiply
if (! bs_a.is_simulating())
{
int_type row_limit = std::min(BlockSideLength, unsigned(z.size() - offset_z)),
col_limit = std::min(BlockSideLength, unsigned(x.size() - offset_x));
if (a_is_transposed)
for (int_type col = 0; col < col_limit; ++col)
for (int_type row = 0; row < row_limit; ++row)
z[offset_z + row] += x[offset_x + col] * ia[row + col * BlockSideLength];
else
for (int_type row = 0; row < row_limit; ++row)
for (int_type col = 0; col < col_limit; ++col)
z[offset_z + row] += x[offset_x + col] * ia[row * BlockSideLength + col];
}
// release
bs_a.release(a, false);
}
//! calculates z = y * A
static row_vector_type&
recursive_matrix_row_vector_multiply_and_add(const row_vector_type& y,
const swappable_block_matrix_type& A, row_vector_type& z,
const vector_size_type offset_y = 0, const vector_size_type offset_z = 0)
{
// catch empty intervals
if (A.get_height() * A.get_width() == 0)
return z;
// base case
if (A.get_height() == 1 || A.get_width() == 1)
return naive_matrix_row_vector_multiply_and_add(y, A, z, offset_y, offset_z);
// partition matrix
swappable_block_matrix_approximative_quarterer qa(A);
// recursive multiplication
// The order of recursive calls is optimized to enhance locality.
recursive_matrix_row_vector_multiply_and_add(y, qa.ul, z, offset_y, offset_z );
recursive_matrix_row_vector_multiply_and_add(y, qa.dl, z, offset_y + qa.ul.get_height(), offset_z );
recursive_matrix_row_vector_multiply_and_add(y, qa.dr, z, offset_y + qa.ul.get_height(), offset_z + qa.ul.get_width());
recursive_matrix_row_vector_multiply_and_add(y, qa.ur, z, offset_y, offset_z + qa.ul.get_width());
return z;
}
static row_vector_type&
naive_matrix_row_vector_multiply_and_add(const row_vector_type& y, const swappable_block_matrix_type& A,
row_vector_type& z,
const vector_size_type offset_y = 0, const vector_size_type offset_z = 0)
{
for (size_type row = 0; row < A.get_height(); ++row)
for (size_type col = 0; col < A.get_width(); ++col)
matrix_row_vector_multiply_and_add_swappable_block(y, A(row, col), A.is_transposed(), A.bs,
z, (offset_y + row) * BlockSideLength, (offset_z + col) * BlockSideLength);
return z;
}
static void matrix_row_vector_multiply_and_add_swappable_block(const row_vector_type& y,
const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a,
row_vector_type& z,
const vector_size_type offset_y = 0, const vector_size_type offset_z = 0)
{
// check if zero-block (== ! initialized)
if (! bs_a.is_initialized(a))
{
// => matrix is zero => product is zero
return;
}
// acquire
internal_block_type& ia = bs_a.acquire(a);
// multiply
if (! bs_a.is_simulating())
{
int_type row_limit = std::min(BlockSideLength, unsigned(y.size() - offset_y)),
col_limit = std::min(BlockSideLength, unsigned(z.size() - offset_z));
if (a_is_transposed)
for (int_type col = 0; col < col_limit; ++col)
for (int_type row = 0; row < row_limit; ++row)
z[offset_z + col] += ia[row + col * BlockSideLength] * y[offset_y + row];
else
for (int_type row = 0; row < row_limit; ++row)
for (int_type col = 0; col < col_limit; ++col)
z[offset_z + col] += ia[row * BlockSideLength + col] * y[offset_y + row];
}
// release
bs_a.release(a, false);
}
// +-+ end matrix-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// +-+-+-+ vector-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
static void recursive_matrix_from_vectors(swappable_block_matrix_type A, const column_vector_type& l,
const row_vector_type& r, vector_size_type offset_l = 0, vector_size_type offset_r = 0)
{
// catch empty intervals
if (A.get_height() * A.get_width() == 0)
return;
// base case
if (A.get_height() == 1 || A.get_width() == 1)
{
naive_matrix_from_vectors(A, l, r, offset_l, offset_r);
return;
}
// partition matrix
swappable_block_matrix_approximative_quarterer qa(A);
// recursive creation
// The order of recursive calls is optimized to enhance locality.
recursive_matrix_from_vectors(qa.ul, l, r, offset_l, offset_r );
recursive_matrix_from_vectors(qa.ur, l, r, offset_l, offset_r + qa.ul.get_width());
recursive_matrix_from_vectors(qa.dr, l, r, offset_l + qa.ul.get_height(), offset_r + qa.ul.get_width());
recursive_matrix_from_vectors(qa.dl, l, r, offset_l + qa.ul.get_height(), offset_r );
}
static void naive_matrix_from_vectors(swappable_block_matrix_type A, const column_vector_type& l,
const row_vector_type& r, vector_size_type offset_l = 0, vector_size_type offset_r = 0)
{
for (size_type row = 0; row < A.get_height(); ++row)
for (size_type col = 0; col < A.get_width(); ++col)
matrix_from_vectors_swappable_block(A(row, col), A.is_transposed(), A.bs,
l, r, (offset_l + row) * BlockSideLength, (offset_r + col) * BlockSideLength);
}
static void matrix_from_vectors_swappable_block(swappable_block_identifier_type a,
const bool a_is_transposed, block_scheduler_type& bs_a,
const column_vector_type& l, const row_vector_type& r,
vector_size_type offset_l, vector_size_type offset_r)
{
// acquire
internal_block_type& ia = bs_a.acquire(a, true);
// multiply
if (! bs_a.is_simulating())
{
int_type row_limit = std::min(BlockSideLength, unsigned(l.size() - offset_l)),
col_limit = std::min(BlockSideLength, unsigned(r.size() - offset_r));
if (a_is_transposed)
for (int_type col = 0; col < col_limit; ++col)
for (int_type row = 0; row < row_limit; ++row)
ia[row + col * BlockSideLength] = l[row + offset_l] * r[col + offset_r];
else
for (int_type row = 0; row < row_limit; ++row)
for (int_type col = 0; col < col_limit; ++col)
ia[row * BlockSideLength + col] = l[row + offset_l] * r[col + offset_r];
}
// release
bs_a.release(a, true);
}
// +-+ end vector-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
};
// Adjust choose_level_for_feedable_sw, too!
template <typename ValueType, unsigned BlockSideLength>
const int_type matrix_operations<ValueType, BlockSideLength>::strassen_winograd_base_case_size = 3;
} // namespace matrix_local
STXXL_END_NAMESPACE
#endif // !STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER
|
GB_unaryop__lnot_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint64
// op(A') function: GB_tran__lnot_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint64
(
uint64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2019, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be RGB (three-channel) image!
//DO NOT EDIT the following code if you don't really understand it.
#if defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
#define _ENABLE_INT8_CONV
#endif
#if defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of SSE2 AVX and NEON at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
}FaceRect;
class CDataBlob
{
public:
float * data_float;
signed char * data_int8;
int width;
int height;
int channels;
int floatChannelStepInByte;
int int8ChannelStepInByte;
float int8float_scale;
bool int8_data_valid;
public:
CDataBlob() {
data_float = 0;
data_int8 = 0;
width = 0;
height = 0;
channels = 0;
floatChannelStepInByte = 0;
int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
CDataBlob(int w, int h, int c)
{
data_float = 0;
data_int8 = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data_float)
myFree(&data_float);
if (data_int8)
myFree(&data_int8);
width = height = channels = floatChannelStepInByte = int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
//alloc space for float array
int remBytes = (sizeof(float)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
floatChannelStepInByte = channels * sizeof(float);
else
floatChannelStepInByte = (channels * sizeof(float)) + (_MALLOC_ALIGN / 8) - remBytes;
data_float = (float*)myAlloc(width * height * floatChannelStepInByte);
//alloc space for int8 array
remBytes = (sizeof(char)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
int8ChannelStepInByte = channels * sizeof(char);
else
int8ChannelStepInByte = (channels * sizeof(char)) + (_MALLOC_ALIGN / 8) - remBytes;
data_int8 = (signed char*)myAlloc(width * height * int8ChannelStepInByte);
if (data_float == NULL)
{
cerr << "Cannot alloc memeory for float data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
if (data_int8 == NULL)
{
cerr << "Cannot alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data_float, 0, width * height * floatChannelStepInByte);
//memset(data_int8, 0, width * height * int8ChannelStepInByte);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->floatChannelStepInByte / sizeof(float);
float * pF = (float*)(this->data_float + (r * this->width + c) * this->floatChannelStepInByte/sizeof(float));
for (int ch = this->channels; ch < pixel_end; ch++)
pF[ch] = 0;
pixel_end = this->int8ChannelStepInByte / sizeof(char);
char * pI = (char*)(this->data_int8 + (r * this->width + c) * this->int8ChannelStepInByte/sizeof(char));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8DataFromCaffeFormat(signed char * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
signed char * p = (this->data_int8 + (width * row + col) * int8ChannelStepInByte /sizeof(char));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setFloatDataFromCaffeFormat(float * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for (int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
float * p = (this->data_float + (width * row + col) * floatChannelStepInByte / sizeof(float));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setDataFromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
create(imgWidth, imgHeight, imgChannels);
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < imgHeight; r++)
{
for (int c = 0; c < imgWidth; c++)
{
const unsigned char * pImgData = imgData + imgWidthStep * r + imgChannels * c;
float * pBlobData = this->data_float + (this->width * r + c) * this->floatChannelStepInByte /sizeof(float);
for (int ch = 0; ch < imgChannels; ch++)
pBlobData[ch] = (float)(pImgData[ch] - pChannelMean[ch]);
}
}
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data_float, 0, width * height * floatChannelStepInByte);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
float * pData = this->data_float + (r * this->width + c) * this->floatChannelStepInByte / sizeof(float);
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + imgWidthStep * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
pData[output_channel_offset] = (float)(pImgData[0] - pChannelMean[0]);
pData[output_channel_offset+1] = (float)(pImgData[1] - pChannelMean[1]);
pData[output_channel_offset+2] = (float)(pImgData[2] - pChannelMean[2]);
}
}
}
}
return true;
}
float getElementFloat(int x, int y, int channel)
{
if (this->data_float)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
float * p = (float*)(this->data_float + (y*this->width + x)*this->floatChannelStepInByte / sizeof(float));
return p[channel];
}
}
return 0.f;
}
int getElementint8(int x, int y, int channel)
{
if (this->data_int8 && this->int8_data_valid)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
signed char * p = this->data_int8 + (y*this->width + x)*this->int8ChannelStepInByte/sizeof(char);
return p[channel];
}
}
return 0;
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
float * p = (dataBlob.data_float + (dataBlob.width * row + col) * dataBlob.floatChannelStepInByte/sizeof(float));
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob *> filters;
int pad;
int stride;
float scale; //element * scale = original value
};
bool convolution(CDataBlob *inputData, const Filters* filters, CDataBlob *outputData);
bool maxpooling2x2S2(const CDataBlob *inputData, CDataBlob *outputData);
bool concat4(const CDataBlob *inputData1, const CDataBlob *inputData2, const CDataBlob *inputData3, const CDataBlob *inputData4, CDataBlob *outputData);
bool scale(CDataBlob * dataBlob, float scale);
bool relu(const CDataBlob *inputOutputData);
bool priorbox(const CDataBlob * featureData, const CDataBlob * imageData, int num_sizes, float * pWinSizes, CDataBlob * outputData);
bool normalize(CDataBlob * inputOutputData, float * pScale);
bool blob2vector(const CDataBlob * inputData, CDataBlob * outputData, bool isFloat);
bool detection_output(const CDataBlob * priorbox, const CDataBlob * loc, const CDataBlob * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob * outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
bool softmax1vector2class(const CDataBlob *inputOutputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.