source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
flush2.c | //Transpose without locks or critical
#include<stdio.h>
#include<time.h>
#include<omp.h>
void main()
{
int a[5][5],b[5][5],c[5][5],d[5][5],ch,flag=1;
//printf("Menu\n1.Express Mode\n2.Custom Mode\n");
//printf("Enter your choice:");
//scanf("%d",&ch);
//comment this
ch = 1;
if(ch == 1)
{
int l = 1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
a[i][j] = l;
b[i][j] = 1;
l++;
}
}
}/*else{
int k=1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
printf("Enter element %d of first matrix:",k);
scanf("%d",&a[i][j]);
k++;
}
}
k = 1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
printf("Enter element %d of second matrix:",k);
scanf("%d",&b[i][j]);
k++;
}
}
} */
printf("\nThe First Matrix is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
printf("\nThe Second Matrix is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", b[i][j]);
}
printf("\n");
}
// sample transpose Matrix
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
d[j][i] = a[i][j];
}
}
clock_t begin = clock();
#pragma omp parallel num_threads(5)
{
int temp;
#pragma omp for
for(int i = 0; i < 5; i++)
{
int id = omp_get_thread_num();
for(int j = 0; j < i; j++)
{
temp = a[i][j];
a[i][j] = a[j][i];
a[j][i] = temp;
}
printf("Thread %d\n",id);
}
}
printf("\nTranspose of First Matrix:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
if(a[i][j] != d[i][j])
{
flag = 0;
break;
}
}
}
if(flag==1)
{
#pragma omp parallel num_threads(5)
{
#pragma omp for
for(int i = 0; i < 5;i++)
{
int id = omp_get_thread_num();
for(int j = 0; j < 5;j++)
{
c[i][j] = a[i][j] + b[i][j];
}
printf("Thread %d\n",id);
}
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Time used = %lfms",time_spent);
printf("\nSum Matrix Is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", c[i][j]);
}
printf("\n");
}
}else{
printf("ERROR!!!!!!!!");
}
}
|
GB_unaryop__abs_int16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_int8
// op(A') function: GB_tran__abs_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_int8
(
int16_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Stencil_par1.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "malloc2D.h"
#include "timer.h"
int main(int argc, char *argv[])
{
struct timespec tstart_cpu, tstop_cpu;
double cpu_time;
int imax=2002, jmax = 2002;
int niter=1000, nburst=100;
double** restrict x = malloc2D(jmax, imax);
double** restrict xnew = malloc2D(jmax, imax);
#pragma omp target teams distribute parallel for simd \
map(x[0:jmax][0:imax], xnew[0:jmax][0:imax])
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp target teams distribute parallel for simd \
map(x[0:jmax][0:imax], xnew[0:jmax][0:imax])
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
for (int iter = 0; iter < niter; iter+=nburst){
for (int ib = 0; ib < nburst; ib++){
cpu_timer_start(&tstart_cpu);
#pragma omp target teams distribute parallel for simd \
map(x[0:jmax][0:imax], xnew[0:jmax][0:imax])
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp target teams distribute parallel for simd \
map(x[0:jmax][0:imax], xnew[0:jmax][0:imax])
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
x[j][i] = xnew[j][i];
}
}
cpu_time += cpu_timer_stop(tstart_cpu);
}
printf("Iter %d\n",iter+nburst);
}
free(x);
free(xnew);
printf("Timing is %lf\n",cpu_time);
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include "box.h"
#include "http_stream.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern int check_mistakes;
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed, int contrastive)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)xcalloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
if (contrastive && (i % 2) == 1) start_time_indexes[i] = start_time_indexes[i - 1];
else start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//printf(" index = %d, ", index);
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths_custom(char **paths, int n, int m, int contrastive)
{
char** random_paths = (char**)xcalloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
int old_index = 0;
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
if (contrastive && (i % 2 == 1)) index = old_index;
else old_index = index;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
return get_random_paths_custom(paths, n, m, 0);
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)xcalloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv, int contrastive)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
int size = w > h ? w : h;
image im;
const int img_index = (contrastive) ? (i / 2) : i;
if(dontuse_opencv) im = load_image_stb_resize(paths[img_index], 0, 0, 3);
else im = load_image_color(paths[img_index], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
image sized = resize_image(crop, w, h);
//show_image(im, "orig");
//show_image(sized, "sized");
//show_image(sized, paths[img_index]);
//wait_until_press_key_cv();
//printf("w = %d, h = %d \n", sized.w, sized.h);
free_image(im);
free_image(crop);
X.vals[i] = sized.data;
X.cols = sized.h*sized.w*sized.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) {
printf("\n Error in read_boxes() \n");
getchar();
}
*n = 0;
return boxes;
}
const int max_obj_img = 4000;// 30000;
const int img_hash = (custom_hash(filename) % max_obj_img)*max_obj_img;
//printf(" img_hash = %d, filename = %s; ", img_hash, filename);
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].track_id = count + img_hash;
//printf(" boxes[count].track_id = %d, count = %d \n", boxes[count].track_id, count);
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
int fill_truth_detection(const char *path, int num_boxes, int truth_size, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
int min_w_h = 0;
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
int track_id = boxes[i].track_id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath);
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
if (check_mistakes) getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath);
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*truth_size +0] = x;
truth[(i-sub)*truth_size +1] = y;
truth[(i-sub)*truth_size +2] = w;
truth[(i-sub)*truth_size +3] = h;
truth[(i-sub)*truth_size +4] = id;
truth[(i-sub)*truth_size +5] = track_id;
//float val = track_id;
//printf(" i = %d, sub = %d, truth_size = %d, track_id = %d, %f, %f\n", i, sub, truth_size, track_id, truth[(i - sub)*truth_size + 5], val);
if (min_w_h == 0) min_w_h = w*net_w;
if (min_w_h > w*net_w) min_w_h = w*net_w;
if (min_w_h > h*net_h) min_w_h = h*net_h;
}
free(boxes);
return min_w_h;
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps)
{
int i;
memset(truth, 0, k * sizeof(float));
int count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
truth[i] = (1 - label_smooth_eps);
++count;
}
else {
truth[i] = label_smooth_eps / (k - 1);
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
int find_max(float *arr, int size) {
int i;
float max = 0;
int n = 0;
for (i = 0; i < size; ++i) {
if (arr[i] > max) {
max = arr[i];
n = i;
}
}
return n;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps, int contrastive)
{
matrix y = make_matrix(n, k);
int i;
if (labels) {
// supervised learning
for (i = 0; i < n; ++i) {
const int img_index = (contrastive) ? (i / 2) : i;
fill_truth_smooth(paths[img_index], labels, k, y.vals[i], label_smooth_eps);
//printf(" n = %d, i = %d, img_index = %d, class_id = %d \n", n, i, img_index, find_max(y.vals[i], k));
if (hierarchy) {
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
} else {
// unsupervised learning
for (i = 0; i < n; ++i) {
const int img_index = (contrastive) ? (i / 2) : i;
const uintptr_t path_p = (uintptr_t)paths[img_index];// abs(random_gen());
const int class_id = path_p % k;
int l;
for (l = 0; l < k; ++l) y.vals[i][l] = 0;
y.vals[i][class_id] = 1;
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
void blend_truth(float *new_truth, int boxes, int truth_size, float *old_truth)
{
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*truth_size];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*truth_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
void blend_truth_mosaic(float *new_truth, int boxes, int truth_size, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup,
int left_shift, int right_shift, int top_shift, int bot_shift,
int net_w, int net_h, int mosaic_bound)
{
const float lowest_w = 1.F / net_w;
const float lowest_h = 1.F / net_h;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*truth_size];
if (!x) break;
count_new_truth++;
}
int new_t = count_new_truth;
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + new_t*truth_size;
new_truth_ptr[0] = 0;
float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size;
float x = old_truth_ptr[0];
if (!x) break;
float xb = old_truth_ptr[0];
float yb = old_truth_ptr[1];
float wb = old_truth_ptr[2];
float hb = old_truth_ptr[3];
// shift 4 images
if (i_mixup == 0) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 1) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 2) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
if (i_mixup == 3) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
int left = (xb - wb / 2)*w;
int right = (xb + wb / 2)*w;
int top = (yb - hb / 2)*h;
int bot = (yb + hb / 2)*h;
if(mosaic_bound)
{
// fix out of Mosaic-bound
float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0;
if (i_mixup == 0) {
left_bound = 0;
right_bound = cut_x;
top_bound = 0;
bot_bound = cut_y;
}
if (i_mixup == 1) {
left_bound = cut_x;
right_bound = w;
top_bound = 0;
bot_bound = cut_y;
}
if (i_mixup == 2) {
left_bound = 0;
right_bound = cut_x;
top_bound = cut_y;
bot_bound = h;
}
if (i_mixup == 3) {
left_bound = cut_x;
right_bound = w;
top_bound = cut_y;
bot_bound = h;
}
if (left < left_bound) {
//printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound);
left = left_bound;
}
if (right > right_bound) {
//printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound);
right = right_bound;
}
if (top < top_bound) top = top_bound;
if (bot > bot_bound) bot = bot_bound;
xb = ((float)(right + left) / 2) / w;
wb = ((float)(right - left)) / w;
yb = ((float)(bot + top) / 2) / h;
hb = ((float)(bot - top)) / h;
}
else
{
// fix out of bound
if (left < 0) {
float diff = (float)left / w;
xb = xb - diff / 2;
wb = wb + diff;
}
if (right > w) {
float diff = (float)(right - w) / w;
xb = xb - diff / 2;
wb = wb - diff;
}
if (top < 0) {
float diff = (float)top / h;
yb = yb - diff / 2;
hb = hb + diff;
}
if (bot > h) {
float diff = (float)(bot - h) / h;
yb = yb - diff / 2;
hb = hb - diff;
}
left = (xb - wb / 2)*w;
right = (xb + wb / 2)*w;
top = (yb - hb / 2)*h;
bot = (yb + hb / 2)*h;
}
// leave only within the image
if(left >= 0 && right <= w && top >= 0 && bot <= h &&
wb > 0 && wb < 1 && hb > 0 && hb < 1 &&
xb > 0 && xb < 1 && yb > 0 && yb < 1 &&
wb > lowest_w && hb > lowest_h)
{
new_truth_ptr[0] = xb;
new_truth_ptr[1] = yb;
new_truth_ptr[2] = wb;
new_truth_ptr[3] = hb;
new_truth_ptr[4] = old_truth_ptr[4];
new_t++;
}
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup,
float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
if (use_mixup == 2 || use_mixup == 4) {
printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n");
if (check_mistakes) getchar();
if(use_mixup == 2) use_mixup = 0;
else use_mixup = 3;
}
if (use_mixup == 3 && letter_box) {
//printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n");
//if (check_mistakes) getchar();
//exit(0);
}
if (random_gen() % 2 == 0) use_mixup = 0;
int i;
int *cut_x = NULL, *cut_y = NULL;
if (use_mixup == 3) {
cut_x = (int*)calloc(n, sizeof(int));
cut_y = (int*)calloc(n, sizeof(int));
const float min_offset = 0.2; // 20%
for (i = 0; i < n; ++i) {
cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset));
cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset));
}
}
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float resize_r1 = 0, resize_r2 = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0, gaussian_noise = 0;
d.y = make_matrix(n, truth_size*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1)
char **random_paths;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive);
else random_paths = get_random_paths_custom(paths, n, m, contrastive);
for (i = 0; i < n; ++i) {
float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float));
const char *filename = random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
printf("\n Error in load_data_detection() - OpenCV \n");
fflush(stdout);
if (check_mistakes) {
getchar();
}
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
float resize_down = resize, resize_up = resize;
if (resize_down > 1.0) resize_down = 1 / resize_down;
int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0
int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0
if (resize_up < 1.0) resize_up = 1 / resize_up;
int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0
int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0
//printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
resize_r1 = random_float();
resize_r2 = random_float();
if (!contrastive || contrastive_jit_flip || i % 2 == 0)
{
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
flip = use_flip ? random_gen() % 2 : 0;
}
r_scale = random_float();
if (!contrastive || contrastive_color || i % 2 == 0)
{
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
}
if (use_blur) {
int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image
if (tmp_blur == 0) blur = 0;
else if (tmp_blur == 1) blur = 1;
else blur = use_blur;
}
if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise;
else gaussian_noise = 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
if (resize < 1) {
// downsize only
pleft += rand_precalc_random(min_rdw, 0, resize_r1);
pright += rand_precalc_random(min_rdw, 0, resize_r2);
ptop += rand_precalc_random(min_rdh, 0, resize_r1);
pbot += rand_precalc_random(min_rdh, 0, resize_r2);
}
else {
pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1);
pright += rand_precalc_random(min_rdw, max_rdw, resize_r2);
ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1);
pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2);
}
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
//float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
//printf(" letter_box = %d \n", letter_box);
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
}
// move each 2nd image to the corner - so that most of it was visible
if (use_mixup == 3 && random_gen() % 2 == 0) {
if (flip) {
if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0;
if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0;
if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0;
if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0;
}
else {
if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0;
if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0;
if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0;
if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0;
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
int min_w_h = fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
//for (int z = 0; z < boxes; ++z) if(truth[z*truth_size] > 0) printf(" track_id = %f \n", truth[z*truth_size + 5]);
//printf(" truth_size = %d \n", truth_size);
if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
gaussian_noise, blur, boxes, truth_size, truth);
if (use_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float));
}
else if (use_mixup == 1) {
if (i_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float));
}
else if (i_mixup == 1) {
image old_img = make_empty_image(w, h, c);
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(d.y.vals[i], boxes, truth_size, truth);
free_image(old_img);
d.X.vals[i] = ai.data;
}
}
else if (use_mixup == 3) {
if (i_mixup == 0) {
image tmp_img = make_image(w, h, c);
d.X.vals[i] = tmp_img.data;
}
if (flip) {
int tmp = pleft;
pleft = pright;
pright = tmp;
}
const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow)));
const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh)));
const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow)));
const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh)));
int k, x, y;
for (k = 0; k < c; ++k) {
for (y = 0; y < h; ++y) {
int j = y*w + k*w*h;
if (i_mixup == 0 && y < cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 1 && y < cut_y[i]) {
int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float));
}
if (i_mixup == 2 && y >= cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 3 && y >= cut_y[i]) {
int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float));
}
}
}
blend_truth_mosaic(d.y.vals[i], boxes, truth_size, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift, w, h, mosaic_bound);
free_image(ai);
ai.data = d.X.vals[i];
}
if (show_imgs && i_mixup == use_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
//sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
//char buff_src[1000];
//sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
//show_image_mat(src, buff_src);
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
if (random_paths) free(random_paths);
}
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int data_size = new_img.w * new_img.h * new_img.c;
int i;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup,
float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive);
else random_paths = get_random_paths_custom(paths, n, m, contrastive);
//assert(use_mixup < 2);
if (use_mixup == 2) {
printf("\n cutmix=1 - isn't supported for Detector \n");
exit(0);
}
if (use_mixup == 3 || use_mixup == 4) {
printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n");
exit(0);
}
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float resize_r1 = 0, resize_r2 = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, truth_size * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
float resize_down = resize, resize_up = resize;
if (resize_down > 1.0) resize_down = 1 / resize_down;
int min_rdw = ow*(1 - (1 / resize_down)) / 2;
int min_rdh = oh*(1 - (1 / resize_down)) / 2;
if (resize_up < 1.0) resize_up = 1 / resize_up;
int max_rdw = ow*(1 - (1 / resize_up)) / 2;
int max_rdh = oh*(1 - (1 / resize_up)) / 2;
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
resize_r1 = random_float();
resize_r2 = random_float();
if (!contrastive || contrastive_jit_flip || i % 2 == 0)
{
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
flip = use_flip ? random_gen() % 2 : 0;
}
r_scale = random_float();
if (!contrastive || contrastive_color || i % 2 == 0)
{
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
}
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
if (resize < 1) {
// downsize only
pleft += rand_precalc_random(min_rdw, 0, resize_r1);
pright += rand_precalc_random(min_rdw, 0, resize_r2);
ptop += rand_precalc_random(min_rdh, 0, resize_r1);
pbot += rand_precalc_random(min_rdh, 0, resize_r2);
}
else {
pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1);
pright += rand_precalc_random(min_rdw, max_rdw, resize_r2);
ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1);
pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2);
}
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, truth_size, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv, a.contrastive);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.truth_size, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.mosaic_bound, a.contrastive, a.contrastive_jit_flip, a.contrastive_color, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed", DARKNET_LOC);
return thread;
}
static const int thread_wait_ms = 5;
static volatile int flag_exit;
static volatile int * run_load_data = NULL;
static load_args * args_swap = NULL;
static pthread_t* threads = NULL;
pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER;
void *run_thread_loop(void *ptr)
{
const int i = *(int *)ptr;
while (!custom_atomic_load_int(&flag_exit)) {
while (!custom_atomic_load_int(&run_load_data[i])) {
if (custom_atomic_load_int(&flag_exit)) {
free(ptr);
return 0;
}
this_thread_sleep_for(thread_wait_ms);
}
pthread_mutex_lock(&mtx_load_data);
load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args));
*args_local = args_swap[i];
pthread_mutex_unlock(&mtx_load_data);
load_thread(args_local);
custom_atomic_store_int(&run_load_data[i], 0);
}
free(ptr);
return 0;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)xcalloc(args.threads, sizeof(data));
if (!threads) {
threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t));
run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int));
args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args));
fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads);
for (i = 0; i < args.threads; ++i) {
int* ptr = (int*)xcalloc(1, sizeof(int));
*ptr = i;
if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed", DARKNET_LOC);
}
}
for (i = 0; i < args.threads; ++i) {
args.d = buffers + i;
args.n = (i + 1) * total / args.threads - i * total / args.threads;
pthread_mutex_lock(&mtx_load_data);
args_swap[i] = args;
pthread_mutex_unlock(&mtx_load_data);
custom_atomic_store_int(&run_load_data[i], 1); // run thread
}
for (i = 0; i < args.threads; ++i) {
while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join
}
/*
pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*/
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
//free(threads);
return 0;
}
void free_load_threads(void *ptr)
{
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
int i;
if (threads) {
custom_atomic_store_int(&flag_exit, 1);
for (i = 0; i < args.threads; ++i) {
pthread_join(threads[i], 0);
}
free((void*)run_load_data);
free(args_swap);
free(threads);
threads = NULL;
custom_atomic_store_int(&flag_exit, 0);
}
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed", DARKNET_LOC);
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0, 0, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)xcalloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)xcalloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle,
float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv, int contrastive)
{
char **paths_stored = paths;
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive);
d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps, contrastive);
if (use_mixup && rand_int(0, 1)) {
char **paths_mix = get_random_paths(paths_stored, n, m);
data d2 = { 0 };
d2.shallow = 0;
d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive);
d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps, contrastive);
free(paths_mix);
data d3 = { 0 };
d3.shallow = 0;
data d4 = { 0 };
d4.shallow = 0;
if (use_mixup >= 3) {
char **paths_mix3 = get_random_paths(paths_stored, n, m);
d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive);
d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps, contrastive);
free(paths_mix3);
char **paths_mix4 = get_random_paths(paths_stored, n, m);
d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive);
d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps, contrastive);
free(paths_mix4);
}
// mix
int i, j;
for (i = 0; i < d2.X.rows; ++i) {
int mixup = use_mixup;
if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic
// MixUp -----------------------------------
if (mixup == 1) {
// mix images
for (j = 0; j < d2.X.cols; ++j) {
d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f;
}
// mix labels
for (j = 0; j < d2.y.cols; ++j) {
d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f;
}
}
// CutMix -----------------------------------
else if (mixup == 2) {
const float min = 0.3; // 0.3*0.3 = 9%
const float max = 0.8; // 0.8*0.8 = 64%
const int cut_w = rand_int(w*min, w*max);
const int cut_h = rand_int(h*min, h*max);
const int cut_x = rand_int(0, w - cut_w - 1);
const int cut_y = rand_int(0, h - cut_h - 1);
const int left = cut_x;
const int right = cut_x + cut_w;
const int top = cut_y;
const int bot = cut_y + cut_h;
assert(cut_x >= 0 && cut_x <= w);
assert(cut_y >= 0 && cut_y <= h);
assert(cut_w >= 0 && cut_w <= w);
assert(cut_h >= 0 && cut_h <= h);
assert(right >= 0 && right <= w);
assert(bot >= 0 && bot <= h);
assert(top <= bot);
assert(left <= right);
const float alpha = (float)(cut_w*cut_h) / (float)(w*h);
const float beta = 1 - alpha;
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = top; y < bot; ++y) {
for (x = left; x < right; ++x) {
int j = x + y*w + c*w*h;
d.X.vals[i][j] = d2.X.vals[i][j];
}
}
}
//printf("\n alpha = %f, beta = %f \n", alpha, beta);
// mix labels
for (j = 0; j < d.y.cols; ++j) {
d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha;
}
}
// Mosaic -----------------------------------
else if (mixup == 3)
{
const float min_offset = 0.2; // 20%
const int cut_x = rand_int(w*min_offset, w*(1 - min_offset));
const int cut_y = rand_int(h*min_offset, h*(1 - min_offset));
float s1 = (float)(cut_x * cut_y) / (w*h);
float s2 = (float)((w - cut_x) * cut_y) / (w*h);
float s3 = (float)(cut_x * (h - cut_y)) / (w*h);
float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h);
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
int j = x + y*w + c*w*h;
if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j];
if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j];
if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j];
if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j];
}
}
}
for (j = 0; j < d.y.cols; ++j) {
const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4)));
d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s;
}
}
}
free_data(d2);
if (use_mixup >= 3) {
free_data(d3);
free_data(d4);
}
}
#ifdef OPENCV
if (use_blur) {
int i;
for (i = 0; i < d.X.rows; ++i) {
if (random_gen() % 4 == 0) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
int ksize = use_blur;
if (use_blur == 1) ksize = 15;
image blurred = blur_image(im, ksize);
free_image(im);
d.X.vals[i] = blurred.data;
//if (i == 0) {
// show_image(im, "Not blurred");
// show_image(blurred, "blurred");
// wait_until_press_key_cv();
//}
}
}
}
#endif // OPENCV
if (show_imgs) {
int i, j;
for (i = 0; i < d.X.rows; ++i) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
char buff[1000];
sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen());
save_image(im, buff);
char buff_string[1000];
sprintf(buff_string, "\n Classes: ");
for (j = 0; j < d.y.cols; ++j) {
if (d.y.vals[i][j] > 0) {
char buff_tmp[100];
sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]);
strcat(buff_string, buff_tmp);
}
}
printf("%s \n", buff_string);
if (show_imgs == 1) {
show_image(im, buff);
wait_until_press_key_cv();
}
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
}
if (m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = w;
d.h = h;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)xcalloc(num, sizeof(float*));
r.y.vals = (float**)xcalloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)xcalloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train ={0};
data test ={0};
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
hcds.h | //
// Created by Bangtian Liu on 6/30/19.
//
#ifndef PROJECT_HCDS_H
#define PROJECT_HCDS_H
#include "HTree.h"
#include "compression.h"
//#include "../../../../opt/intel/compilers_and_libraries_2019.4.233/mac/mkl/include/mkl.h"
struct hcds{
double *CacheNear;
uint64_t *nearoffset;
double *CacheFar;
uint64_t *faroffset;
int *projoffset;
double *Proj;
int *w_skeloffset;
int *u_skeloffset;
double *w_skel;
double *u_skel;
int *woffset;
int *uoffset;
int *skel_length;
int *proj_column;
// if non-blocking
unsigned long int *utmpoffset;
double *utmp;
unsigned long int *ftmpoffset;
double *ftmp;
};
void CacheNearBlock(HTree &tree, hcds &cds)
{
cds.nearoffset = (uint64_t *) mkl_malloc(sizeof(uint64_t)*tree.ncount, 64);
memset(cds.nearoffset, 0, sizeof(uint64_t)*tree.ncount);
unsigned long int index=0;
uint64_t offset=0;
//int count=0;
auto start=omp_get_wtime();
for(int i=0; i<tree.nrow; i++)
{
for(int j=tree.nblockset[i]; j<tree.nblockset[i+1]; j++)
{
// ++count;
for(int k = tree.nblocks[j]; k<tree.nblocks[j+1]; k++)
{
auto nx = tree.nxval[k];
auto ny = tree.nyval[k];
auto dimx = tree.Dim[nx];
auto dimy = tree.Dim[ny];
cds.nearoffset[index++] = offset;
offset += dimx*dimy;
}
}
}
auto stop = omp_get_wtime();
tree.cdstime += stop - start;
//printf("nblocks=%d\n", count );
cds.CacheNear = (double *)mkl_malloc(sizeof(double)*offset, 64);
memset(cds.CacheNear, 0, sizeof(double)*offset);
index = 0;
//#pragma omp parallel for
for(int i=0; i<tree.nrow; i++)
{
for(int j=tree.nblockset[i]; j<tree.nblockset[i+1]; j++)
{
for(int k = tree.nblocks[j]; k<tree.nblocks[j+1]; k++)
{
auto nx = tree.nxval[k];
auto ny = tree.nyval[k];
auto idx = tree.leaf[nx];
auto idy = tree.leaf[ny];
auto lidx = tree.lids + tree.lidsoffset[idx];
auto lidy = tree.lids + tree.lidsoffset[idy];
auto lena = tree.lidslen[idx];
auto lenb = tree.lidslen[idy];
auto offset = cds.nearoffset[k];
Fsubmatrix(lidx, lena, lidy, lenb, cds.CacheNear+offset, tree.ktype, tree.X, tree.dim, tree.h);
}
}
}
}
void CacheFarBlock(HTree &tree, hcds &cds, ret *rtmp)
{
cds.faroffset = (uint64_t *) mkl_malloc(sizeof(uint64_t)*tree.fcount, 64);
memset(cds.faroffset, 0, sizeof(uint64_t)*tree.fcount);
unsigned long int index=0;
uint64_t offset=0;
// int count=0;
auto start=omp_get_wtime();
for(int i=0; i<tree.frow; i++)
{
for(int j=tree.fblockset[i]; j<tree.fblockset[i+1]; j++)
{
// count++;
for(int k = tree.fblocks[j]; k<tree.fblocks[j+1]; k++)
{
auto fx = tree.fxval[k];
auto fy = tree.fyval[k];
auto dimx = rtmp[fx].skels_length;
auto dimy = rtmp[fy].skels_length;
cds.faroffset[index++] = offset;
offset += dimx*dimy;
}
}
}
auto stop = omp_get_wtime();
tree.cdstime += stop - start;
//printf("fblock=%d\n", count);
cds.CacheFar = (double *)mkl_malloc(sizeof(double)*offset, 64);
memset(cds.CacheFar, 0, sizeof(double)*offset);
index = 0;
//#pragma omp parallel for
for(int i=0; i<tree.frow; i++)
{
for(int j=tree.fblockset[i]; j<tree.fblockset[i+1]; j++)
{
for(int k = tree.fblocks[j]; k<tree.fblocks[j+1]; k++)
{
auto fx = tree.fxval[k];
auto fy = tree.fyval[k];
auto lidx = rtmp[fx].skels ;
auto lidy = rtmp[fy].skels;
auto lena = rtmp[fx].skels_length;
auto lenb = rtmp[fy].skels_length;
auto offset = cds.faroffset[k];
Fsubmatrix(lidx, lena, lidy, lenb, cds.CacheFar+offset, tree.ktype, tree.X, tree.dim, tree.h);
}
}
}
}
void CacheNear(HTree &tree, hcds &cds)
{
cds.nearoffset = (uint64_t *) mkl_malloc(sizeof(uint64_t)*tree.ncount, 64);
memset(cds.nearoffset, 0, sizeof(uint64_t)*tree.ncount);
cds.utmpoffset = (unsigned long int *) mkl_malloc(sizeof(unsigned long int)*tree.ncount, 64);
memset(cds.utmpoffset, 0, sizeof(unsigned long int)*tree.ncount);
unsigned long int index=0;
uint64_t offset=0;
unsigned long int tmpoffset=0;
auto start = omp_get_wtime();
for(int k=0; k<tree.ncount; k++)
{
auto nx = tree.nxval[k];
auto ny = tree.nyval[k];
auto dimx = tree.Dim[nx];
auto dimy = tree.Dim[ny];
cds.nearoffset[index] = offset;
cds.utmpoffset[index++] = tmpoffset;
offset += dimx*dimy;
tmpoffset += dimx*2048;
}
auto stop = omp_get_wtime();
tree.cdstime += (stop - start);
cds.CacheNear = (double *)mkl_malloc(sizeof(double)*offset, 64);
memset(cds.CacheNear, 0, sizeof(double)*offset);
// cds.utmp = (double *)mkl_malloc(sizeof(double)*tmpoffset, 64);
// memset(cds.utmp, 0, sizeof(double)*tmpoffset);
index = 0;
#pragma omp parallel for
for(int k=0; k<tree.ncount; k++)
{
auto nx = tree.nxval[k];
auto ny = tree.nyval[k];
auto idx = tree.leaf[nx];
auto idy = tree.leaf[ny];
auto lidx = tree.lids + tree.lidsoffset[idx];
auto lidy = tree.lids + tree.lidsoffset[idy];
auto lena = tree.lidslen[idx];
auto lenb = tree.lidslen[idy];
auto offset = cds.nearoffset[k];
Fsubmatrix(lidx, lena, lidy, lenb, cds.CacheNear+offset, tree.ktype, tree.X, tree.dim, tree.h);
}
}
void CacheFar(HTree &tree, hcds &cds, ret *rtmp)
{
cds.faroffset = (uint64_t *) mkl_malloc(sizeof(uint64_t)*tree.fcount, 64);
memset(cds.faroffset, 0, sizeof(uint64_t)*tree.fcount);
cds.ftmpoffset = (unsigned long int *)mkl_malloc(sizeof(unsigned long int)*tree.fcount, 64);
memset(cds.ftmpoffset, 0, sizeof(unsigned long int)*tree.fcount);
unsigned long int index=0;
unsigned long int offset=0;
unsigned long int tmpoffset=0;
auto start = omp_get_wtime();
for(int k = 0; k<tree.fcount; k++)
{
auto fx = tree.fxval[k];
auto fy = tree.fyval[k];
auto dimx = rtmp[fx].skels_length;
auto dimy = rtmp[fy].skels_length;
cds.faroffset[index] = offset;
cds.ftmpoffset[index++] = tmpoffset;
offset += dimx*dimy;
tmpoffset += dimx*2048;
}
auto stop = omp_get_wtime();
tree.cdstime += stop - start;
cds.CacheFar = (double *)mkl_malloc(sizeof(double)*offset, 64);
memset(cds.CacheFar, 0, sizeof(double)*offset);
// cds.ftmp = (double *) mkl_malloc(sizeof(double)*tmpoffset, 64);
// memset(cds.ftmp, 0, sizeof(double)*tmpoffset);
index = 0;
#pragma omp parallel for
for(int k = 0; k<tree.fcount; k++)
{
auto fx = tree.fxval[k];
auto fy = tree.fyval[k];
auto lidx = rtmp[fx].skels ;
auto lidy = rtmp[fy].skels;
auto lena = rtmp[fx].skels_length;
auto lenb = rtmp[fy].skels_length;
auto offset = cds.faroffset[k];
Fsubmatrix(lidx, lena, lidy, lenb, cds.CacheFar+offset, tree.ktype, tree.X, tree.dim, tree.h);
}
}
void cacheProj(HTree &tree, hcds &cds, ret *rtmp)
{
int numnodes=tree.numnodes;
cds.projoffset = (int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.projoffset,0,sizeof(int)*numnodes);
int offset=0;
for(int i=tree.depth-1; i>-1; i--)
{
for(int j=tree.levelset[i]; j<tree.levelset[i+1]; j++)
{
auto idx = tree.idx[j];
auto skel = rtmp[idx].skels_length;
auto proj = rtmp[idx].proj_column;
cds.projoffset[idx]=offset;
offset+=skel*proj;
}
}
cds.Proj = (double *)malloc(sizeof(double)*offset);
#pragma omp parallel for
for(int i=tree.depth-1; i>-1; i--) {
for (int j = tree.levelset[i]; j < tree.levelset[i + 1]; j++)
{
auto idx = tree.idx[j];
auto proj = cds.Proj + cds.projoffset[idx];
auto skel = rtmp[idx].skels_length;
auto sproj = rtmp[idx].proj_column;
memcpy(proj, rtmp[idx].proj, sizeof(double)*skel*sproj);
}
}
}
void coarcacheProj(HTree &tree, hcds &cds, clustertree &ctree, ret *rtmp)
{
auto &posw = ctree.opostw;
int numnodes=tree.numnodes;
cds.projoffset = (int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.projoffset,0,sizeof(int)*numnodes);
int offset = 0;
for(int i=0;i<posw.size();i++)
{
auto &lpow=posw[i];
for(int j=0;j<lpow.size();j++)
{
auto wpart=lpow[j];
for(auto &v:wpart)
{
auto skel = rtmp[v].skels_length;
auto proj = rtmp[v].proj_column;
cds.projoffset[v]=offset;
offset+=skel*proj;
}
}
}
cds.Proj = (double *)malloc(sizeof(double)*offset);
#pragma omp parallel for
for(int i=0;i<posw.size();i++)
{
auto &lpow=posw[i];
for(int j=0;j<lpow.size();j++)
{
auto wpart=lpow[j];
for(auto &v:wpart)
{
auto proj = cds.Proj + cds.projoffset[v];
auto tsize = rtmp[v].skels_length*rtmp[v].proj_column;
memcpy(proj,rtmp[v].proj, sizeof(double)*tsize);
}
}
}
}
void cacheWUskel(HTree &tree, hcds &cds, ret *rtmp, int nrhs)
{
int numnodes=tree.numnodes;
cds.w_skeloffset=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.w_skeloffset,0,sizeof(int)*numnodes);
cds.u_skeloffset=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.u_skeloffset,0,sizeof(int)*numnodes);
int offset=0;
for(int i=tree.depth-1; i>-1; i--) {
for (int j = tree.levelset[i]; j < tree.levelset[i + 1]; j++) {
auto idx = tree.idx[j];
auto skel=rtmp[idx].skels_length;
cds.w_skeloffset[idx]=offset;
cds.u_skeloffset[idx]=offset;
offset+=skel*nrhs;
}
}
cds.w_skel=(double *)malloc(sizeof(double)*offset);
memset(cds.w_skel,0,sizeof(double)*offset);
cds.u_skel=(double *)malloc(sizeof(double)*offset);
memset(cds.u_skel,0,sizeof(double)*offset);
}
void coarcacheWUskel(HTree &tree, hcds &cds, clustertree &ctree, ret *rtmp, int nrhs)
{
auto &posw = ctree.opostw;
int numnodes=tree.numnodes;
cds.w_skeloffset=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.w_skeloffset,0,sizeof(int)*numnodes);
cds.u_skeloffset=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.u_skeloffset,0,sizeof(int)*numnodes);
int offset = 0;
for(int i=0;i<posw.size();i++)
{
auto &lpow=posw[i];
for(int j=0;j<lpow.size();j++)
{
auto wpart=lpow[j];
for(auto &v:wpart)
{
auto skel=rtmp[v].skels_length;
auto ncol=nrhs;
cds.w_skeloffset[v]=offset;
cds.u_skeloffset[v]=offset;
offset+=skel*ncol;
}
}
}
cds.w_skel=(double *)malloc(sizeof(double)*offset);
memset(cds.w_skel,0,sizeof(double)*offset);
cds.u_skel=(double *)malloc(sizeof(double)*offset);
memset(cds.u_skel,0,sizeof(double)*offset);
}
void cachewuoffset(HTree &tree, hcds &cds, ret *rtmp, int nrhs)
{
int nl = tree.nleaf;
cds.woffset = (int *)mkl_malloc(sizeof(int)*nl,64);
cds.uoffset = (int *)mkl_malloc(sizeof(int)*nl,64);
int offset=0;
for(int i=0; i<tree.nleaf; i++)
{
auto idx = tree.leaf[i];
auto dim = tree.lidslen[idx];
cds.woffset[i] = offset;
cds.uoffset[i] = offset;
offset += dim * nrhs;
}
}
void cacheSkeldim(HTree &tree, hcds &cds, ret *rtmp)
{
int numnodes=tree.numnodes;
cds.skel_length=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.skel_length,0,sizeof(int)*numnodes);
cds.proj_column=(int *)mkl_malloc(sizeof(int)*numnodes,64);
memset(cds.proj_column,0,sizeof(int)*numnodes);
for(int i=1;i<numnodes;i++){
cds.skel_length[i]=rtmp[i].skels_length;
cds.proj_column[i]=rtmp[i].proj_column;
}
}
void DUV2CDS(HTree &tree, hcds &cds, clustertree &ctree, ret *rtmp, int nrhs, bool coarsing, bool cbs)
{
if(cbs){
CacheNearBlock(tree,cds);
CacheFarBlock(tree, cds, rtmp);
}
else{
CacheNear(tree, cds);
CacheFar(tree, cds, rtmp);
}
auto start = omp_get_wtime();
if(coarsing){
coarcacheProj(tree, cds, ctree, rtmp);
// coarcacheWUskel(tree, cds, ctree, rtmp, nrhs);
}
else {
cacheProj(tree, cds, rtmp);
// cacheWUskel(tree,cds, rtmp, nrhs);
}
cachewuoffset(tree, cds, rtmp, nrhs);
cacheSkeldim(tree, cds, rtmp);
auto ends = omp_get_wtime();
tree.cdstime+=(ends - start);
// printf("%f,", tree.cdstime);
}
void transform(HTree & tree, hcds cds, double *W, int n, int nrhs, double *TW)
{
for(int i=0; i<tree.nleaf; i++)
{
auto idx = tree.leaf[i];
auto len = tree.lidslen[idx];
auto lids = tree.lids + tree.lidsoffset[idx];
// printf("idx=%d len=%d\n", idx, len);
auto tw = TW + cds.woffset[tree.lm[idx]];
int toffset=0;
for(int j = 0; j<nrhs; j++)
{
for(int k = 0; k<len; k++)
{
tw[toffset++] = W[j*n+lids[k]];
}
}
}
}
void allocatewuskel(HTree &tree, hcds &cds, clustertree &ctree, ret *rtmp, int nrhs, bool coarsing)
{
if(coarsing){
coarcacheWUskel(tree, cds, ctree, rtmp, nrhs);
}
else {
cacheWUskel(tree,cds, rtmp, nrhs);
}
// cachewuoffset(tree, cds, rtmp, nrhs);
}
#endif //PROJECT_HCDS_H
|
adaptive_maxpool_2d.h | // Copyright 2018 Joan Puigcerver
#ifndef NNUTILS_CPU_ADAPTIVE_MAXPOOL_2D_H_
#define NNUTILS_CPU_ADAPTIVE_MAXPOOL_2D_H_
#include <nnutils/adaptive_pool.h>
#include <nnutils/utils.h>
#include <cassert>
#ifdef __cplusplus
namespace nnutils {
namespace cpu {
using nnutils::internal::pixv;
using nnutils::internal::start_index;
using nnutils::internal::end_index;
template <typename T, typename Int>
void adaptive_maxpool_2d_fwd(
const Int N, const Int C,
const Int inp_H, const Int inp_W, const Int out_H, const Int out_W,
const Int* inp_sizes, const Int* out_sizes,
const T* inp, T* out, Int* out_idx) {
assert(N > 0 && C > 0 && inp_H > 0 && inp_W > 0);
assert(out_H > 0 && out_W > 0);
assert(inp != nullptr);
assert(out != nullptr);
#pragma omp parallel for collapse(4)
for (Int n = 0; n < N; ++n) {
for (Int c = 0; c < C; ++c) {
for (Int y = 0; y < out_H; ++y) {
for (Int x = 0; x < out_W; ++x) {
// Input height and width.
const Int hi = inp_sizes ? inp_sizes[2 * n ] : inp_H;
const Int wi = inp_sizes ? inp_sizes[2 * n + 1] : inp_W;
// Output height and width.
const Int ho = out_sizes ? out_sizes[2 * n ] : out_H;
const Int wo = out_sizes ? out_sizes[2 * n + 1] : out_W;
// Pointers to the input/output data for the current sample/channel.
const T* inp_nc = inp + n * C * inp_H * inp_W + c * inp_H * inp_W;
T* out_nc = out + n * C * out_H * out_W + c * out_H * out_W;
Int* out_idx_nc = out_idx + n * C * out_H * out_W + c * out_H * out_W;
if (y < ho && x < wo) {
const Int i0 = start_index<Int>(y, ho, hi);
const Int i1 = end_index<Int>(y, ho, hi);
const Int j0 = start_index<Int>(x, wo, wi);
const Int j1 = end_index<Int>(x, wo, wi);
T val = pixv(inp_nc, inp_W, i0, j0);
Int idx = i0 * inp_W + j0;
for (Int i = i0; i < i1; ++i) {
for (Int j = j0; j < j1; ++j) {
const T& v = pixv(inp_nc, inp_W, i, j);
if (v > val) {
val = v;
idx = i * inp_W + j;
}
}
}
pixv(out_nc, out_W, y, x) = val;
if (out_idx) { pixv(out_idx_nc, out_W, y, x) = idx; }
} else {
pixv(out_nc, out_W, y, x) = 0;
if (out_idx) { pixv(out_idx_nc, out_W, y, x) = 0; }
}
}
}
}
}
}
template <typename T, typename Int>
void adaptive_maxpool_2d_bwd(
const Int N, const Int C, const Int inp_H, const Int inp_W,
const Int out_H, const Int out_W, const Int* out_sizes,
const Int* out_idx, const T* grad_output, T* grad_input) {
assert(N > 0 && C > 0 && inp_H > 0 && inp_W > 0);
assert(out_H > 0 && out_W > 0);
assert(grad_output != nullptr);
assert(out_idx != nullptr);
assert(grad_input != nullptr);
#pragma omp parallel for collapse(4)
for (Int n = 0; n < N; ++n) {
for (Int c = 0; c < C; ++c) {
for (Int y = 0; y < out_H; ++y) {
for (Int x = 0; x < out_W; ++x) {
// Output height and width for the current image.
const Int ho = out_sizes ? out_sizes[2 * n ] : out_H;
const Int wo = out_sizes ? out_sizes[2 * n + 1] : out_W;
if (y < ho && x < wo) {
const Int inp_offset = n * C * inp_H * inp_W + c * inp_H * inp_W;
const Int out_offset = n * C * out_H * out_W + c * out_H * out_W;
// Pointer to the output gradients of the current image and channel.
const T* g_out_nc = grad_output + out_offset;
// Index of the input pixel that was selected as the maximum.
const Int idx = pixv(out_idx + out_offset, out_W, y, x);
// Update input gradients for the selected input pixel.
#pragma omp atomic
grad_input[inp_offset + idx] += pixv(g_out_nc, out_W, y, x);
}
}
}
}
}
}
} // namespace cpu
} // namespace nnutils
#endif // __cplusplus
#endif // NNUTILS_CPU_ADAPTIVE_MAXPOOL_2D_H_
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),
GetPixelBlue(p),&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
__clang_hip_math.h | /*===---- __clang_hip_math.h - Device-side HIP math support ----------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __CLANG_HIP_MATH_H__
#define __CLANG_HIP_MATH_H__
#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <algorithm>
#endif
#include <limits.h>
#include <stdint.h>
#ifdef __OPENMP_AMDGCN__
#include <omp.h>
#endif
#endif // !defined(__HIPCC_RTC__)
#pragma push_macro("__DEVICE__")
#ifdef __OPENMP_AMDGCN__
#define __DEVICE__ static inline __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
#endif
// A few functions return bool type starting only in C++11.
#pragma push_macro("__RETURN_TYPE")
#ifdef __OPENMP_AMDGCN__
#define __RETURN_TYPE int
#else
#if defined(__cplusplus)
#define __RETURN_TYPE bool
#else
#define __RETURN_TYPE int
#endif
#endif // __OPENMP_AMDGCN__
#if defined (__cplusplus) && __cplusplus < 201103L
// emulate static_assert on type sizes
template<bool>
struct __compare_result{};
template<>
struct __compare_result<true> {
static const __device__ bool valid;
};
__DEVICE__
void __suppress_unused_warning(bool b){};
template <unsigned int S, unsigned int T>
__DEVICE__ void __static_assert_equal_size() {
__suppress_unused_warning(__compare_result<S == T>::valid);
}
#define __static_assert_type_size_equal(A, B) \
__static_assert_equal_size<A,B>()
#else
#define __static_assert_type_size_equal(A,B) \
static_assert((A) == (B), "")
#endif
__DEVICE__
uint64_t __make_mantissa_base8(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '7')
__r = (__r * 8u) + __tmp - '0';
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa_base10(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
__r = (__r * 10u) + __tmp - '0';
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa_base16(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
__r = (__r * 16u) + __tmp - '0';
else if (__tmp >= 'a' && __tmp <= 'f')
__r = (__r * 16u) + __tmp - 'a' + 10;
else if (__tmp >= 'A' && __tmp <= 'F')
__r = (__r * 16u) + __tmp - 'A' + 10;
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa(const char *__tagp) {
if (!__tagp)
return 0u;
if (*__tagp == '0') {
++__tagp;
if (*__tagp == 'x' || *__tagp == 'X')
return __make_mantissa_base16(__tagp);
else
return __make_mantissa_base8(__tagp);
}
return __make_mantissa_base10(__tagp);
}
// BEGIN FLOAT
#if defined(__cplusplus)
__DEVICE__
int abs(int __x) {
int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
__DEVICE__
long labs(long __x) {
long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
__DEVICE__
long long llabs(long long __x) {
long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
#endif
__DEVICE__
float acosf(float __x) { return __ocml_acos_f32(__x); }
__DEVICE__
float acoshf(float __x) { return __ocml_acosh_f32(__x); }
__DEVICE__
float asinf(float __x) { return __ocml_asin_f32(__x); }
__DEVICE__
float asinhf(float __x) { return __ocml_asinh_f32(__x); }
__DEVICE__
float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
__DEVICE__
float atanf(float __x) { return __ocml_atan_f32(__x); }
__DEVICE__
float atanhf(float __x) { return __ocml_atanh_f32(__x); }
__DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
float ceilf(float __x) { return __ocml_ceil_f32(__x); }
__DEVICE__
float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
__DEVICE__
float cosf(float __x) { return __ocml_cos_f32(__x); }
__DEVICE__
float coshf(float __x) { return __ocml_cosh_f32(__x); }
__DEVICE__
float cospif(float __x) { return __ocml_cospi_f32(__x); }
__DEVICE__
float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
__DEVICE__
float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
__DEVICE__
float erfcf(float __x) { return __ocml_erfc_f32(__x); }
__DEVICE__
float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
__DEVICE__
float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
__DEVICE__
float erff(float __x) { return __ocml_erf_f32(__x); }
__DEVICE__
float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
__DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
float exp2f(float __x) { return __ocml_exp2_f32(__x); }
__DEVICE__
float expf(float __x) { return __ocml_exp_f32(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
__DEVICE__
float fabsf(float __x) { return __ocml_fabs_f32(__x); }
__DEVICE__
float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
__DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
float floorf(float __x) { return __ocml_floor_f32(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
}
__DEVICE__
float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
__DEVICE__
float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r =
__ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
}
__DEVICE__
float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
__DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
__DEVICE__
__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
__DEVICE__
__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
__DEVICE__
float j1f(float __x) { return __ocml_j1_f32(__x); }
__DEVICE__
float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case.
if (__n == 0)
return j0f(__x);
if (__n == 1)
return j1f(__x);
float __x0 = j0f(__x);
float __x1 = j1f(__x);
for (int __i = 1; __i < __n; ++__i) {
float __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
__DEVICE__
float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
long long int llroundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float log10f(float __x) { return __ocml_log10_f32(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
float log2f(float __x) { return __ocml_log2_f32(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
float logf(float __x) { return __ocml_log_f32(__x); }
__DEVICE__
long int lrintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
long int lroundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r =
__ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__iptr = __tmp;
return __r;
}
__DEVICE__
float nanf(const char *__tagp) {
union {
float val;
struct ieee_float {
unsigned int mantissa : 22;
unsigned int quiet : 1;
unsigned int exponent : 8;
unsigned int sign : 1;
} bits;
} __tmp;
__static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
__tmp.bits.quiet = 1u;
__tmp.bits.mantissa = __make_mantissa(__tagp);
return __tmp.val;
}
__DEVICE__
float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
return __ocml_nextafter_f32(__x, __y);
}
__DEVICE__
float norm3df(float __x, float __y, float __z) {
return __ocml_len3_f32(__x, __y, __z);
}
__DEVICE__
float norm4df(float __x, float __y, float __z, float __w) {
return __ocml_len4_f32(__x, __y, __z, __w);
}
__DEVICE__
float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
__DEVICE__
float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
__DEVICE__
float normf(int __dim,
const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_sqrt_f32(__r);
}
__DEVICE__
float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
__DEVICE__
float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }
__DEVICE__
float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
__DEVICE__
float remainderf(float __x, float __y) {
return __ocml_remainder_f32(__x, __y);
}
__DEVICE__
float remquof(float __x, float __y, int *__quo) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r = __ocml_remquo_f32(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
*__quo = __tmp;
return __r;
}
__DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
float rintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
return __ocml_rlen3_f32(__x, __y, __z);
}
__DEVICE__
float rnorm4df(float __x, float __y, float __z, float __w) {
return __ocml_rlen4_f32(__x, __y, __z, __w);
}
__DEVICE__
float rnormf(int __dim,
const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_rsqrt_f32(__r);
}
__DEVICE__
float roundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr =
__ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
void sincospif(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincospi_f32(
__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
float sinf(float __x) { return __ocml_sin_f32(__x); }
__DEVICE__
float sinhf(float __x) { return __ocml_sinh_f32(__x); }
__DEVICE__
float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
__DEVICE__
float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
__DEVICE__
float tanf(float __x) { return __ocml_tan_f32(__x); }
__DEVICE__
float tanhf(float __x) { return __ocml_tanh_f32(__x); }
__DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
float truncf(float __x) { return __ocml_trunc_f32(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
__DEVICE__
float y1f(float __x) { return __ocml_y1_f32(__x); }
__DEVICE__
float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return y0f(__x);
if (__n == 1)
return y1f(__x);
float __x0 = y0f(__x);
float __x1 = y1f(__x);
for (int __i = 1; __i < __n; ++__i) {
float __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
// BEGIN INTRINSICS
__DEVICE__
float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
__DEVICE__
float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
__DEVICE__
float __expf(float __x) { return __ocml_native_exp_f32(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
__DEVICE__
float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
__DEVICE__
float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
__DEVICE__
float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fadd_rn(float __x, float __y) { return __x + __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
__DEVICE__
float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
__DEVICE__
float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
__DEVICE__
float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fdiv_rn(float __x, float __y) { return __x / __y; }
#endif
__DEVICE__
float __fdividef(float __x, float __y) { return __x / __y; }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fmaf_rd(float __x, float __y, float __z) {
return __ocml_fma_rtn_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
return __ocml_fma_rte_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_ru(float __x, float __y, float __z) {
return __ocml_fma_rtp_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_rz(float __x, float __y, float __z) {
return __ocml_fma_rtz_f32(__x, __y, __z);
}
#else
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
}
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
__DEVICE__
float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
__DEVICE__
float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
__DEVICE__
float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fmul_rn(float __x, float __y) { return __x * __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }
__DEVICE__
float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }
__DEVICE__
float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }
__DEVICE__
float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }
#else
__DEVICE__
float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
__DEVICE__
float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
__DEVICE__
float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
__DEVICE__
float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
__DEVICE__
float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
#else
__DEVICE__
float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
__DEVICE__
float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
__DEVICE__
float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
__DEVICE__
float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fsub_rn(float __x, float __y) { return __x - __y; }
#endif
__DEVICE__
float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
__DEVICE__
float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
__DEVICE__
float __logf(float __x) { return __ocml_native_log_f32(__x); }
__DEVICE__
float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
__DEVICE__
float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
__DEVICE__
void __sincosf(float __x, float *__sinptr, float *__cosptr) {
*__sinptr = __ocml_native_sin_f32(__x);
*__cosptr = __ocml_native_cos_f32(__x);
}
__DEVICE__
float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
__DEVICE__
float __tanf(float __x) { return __ocml_tan_f32(__x); }
// END INTRINSICS
// END FLOAT
// BEGIN DOUBLE
__DEVICE__
double acos(double __x) { return __ocml_acos_f64(__x); }
__DEVICE__
double acosh(double __x) { return __ocml_acosh_f64(__x); }
__DEVICE__
double asin(double __x) { return __ocml_asin_f64(__x); }
__DEVICE__
double asinh(double __x) { return __ocml_asinh_f64(__x); }
__DEVICE__
double atan(double __x) { return __ocml_atan_f64(__x); }
__DEVICE__
double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }
__DEVICE__
double atanh(double __x) { return __ocml_atanh_f64(__x); }
__DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
double ceil(double __x) { return __ocml_ceil_f64(__x); }
__DEVICE__
double copysign(double __x, double __y) {
return __ocml_copysign_f64(__x, __y);
}
__DEVICE__
double cos(double __x) { return __ocml_cos_f64(__x); }
__DEVICE__
double cosh(double __x) { return __ocml_cosh_f64(__x); }
__DEVICE__
double cospi(double __x) { return __ocml_cospi_f64(__x); }
__DEVICE__
double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
__DEVICE__
double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
__DEVICE__
double erf(double __x) { return __ocml_erf_f64(__x); }
__DEVICE__
double erfc(double __x) { return __ocml_erfc_f64(__x); }
__DEVICE__
double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
__DEVICE__
double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
__DEVICE__
double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
__DEVICE__
double exp(double __x) { return __ocml_exp_f64(__x); }
__DEVICE__
double exp10(double __x) { return __ocml_exp10_f64(__x); }
__DEVICE__
double exp2(double __x) { return __ocml_exp2_f64(__x); }
__DEVICE__
double expm1(double __x) { return __ocml_expm1_f64(__x); }
__DEVICE__
double fabs(double __x) { return __ocml_fabs_f64(__x); }
__DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
double floor(double __x) { return __ocml_floor_f64(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
}
__DEVICE__
double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
__DEVICE__
double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r =
__ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
}
__DEVICE__
double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }
__DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
__DEVICE__
__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
__DEVICE__
__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
__DEVICE__
double j1(double __x) { return __ocml_j1_f64(__x); }
__DEVICE__
double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return j0(__x);
if (__n == 1)
return j1(__x);
double __x0 = j0(__x);
double __x1 = j1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
__DEVICE__
double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
long long int llrint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
long long int llround(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
__DEVICE__
double log10(double __x) { return __ocml_log10_f64(__x); }
__DEVICE__
double log1p(double __x) { return __ocml_log1p_f64(__x); }
__DEVICE__
double log2(double __x) { return __ocml_log2_f64(__x); }
__DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
long int lrint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
long int lround(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r =
__ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
*__iptr = __tmp;
return __r;
}
__DEVICE__
double nan(const char *__tagp) {
#if !_WIN32
union {
double val;
struct ieee_double {
uint64_t mantissa : 51;
uint32_t quiet : 1;
uint32_t exponent : 11;
uint32_t sign : 1;
} bits;
} __tmp;
__static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
__tmp.bits.quiet = 1u;
__tmp.bits.mantissa = __make_mantissa(__tagp);
return __tmp.val;
#else
__static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));
uint64_t __val = __make_mantissa(__tagp);
__val |= 0xFFF << 51;
return *reinterpret_cast<double *>(&__val);
#endif
}
__DEVICE__
double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
return __ocml_nextafter_f64(__x, __y);
}
__DEVICE__
double norm(int __dim,
const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_sqrt_f64(__r);
}
__DEVICE__
double norm3d(double __x, double __y, double __z) {
return __ocml_len3_f64(__x, __y, __z);
}
__DEVICE__
double norm4d(double __x, double __y, double __z, double __w) {
return __ocml_len4_f64(__x, __y, __z, __w);
}
__DEVICE__
double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
__DEVICE__
double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
__DEVICE__
double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
__DEVICE__
double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }
__DEVICE__
double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
__DEVICE__
double remainder(double __x, double __y) {
return __ocml_remainder_f64(__x, __y);
}
__DEVICE__
double remquo(double __x, double __y, int *__quo) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r = __ocml_remquo_f64(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
*__quo = __tmp;
return __r;
}
__DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
double rint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
double rnorm(int __dim,
const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_rsqrt_f64(__r);
}
__DEVICE__
double rnorm3d(double __x, double __y, double __z) {
return __ocml_rlen3_f64(__x, __y, __z);
}
__DEVICE__
double rnorm4d(double __x, double __y, double __z, double __w) {
return __ocml_rlen4_f64(__x, __y, __z, __w);
}
__DEVICE__
double round(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
__DEVICE__
void sincos(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincos_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
void sincospi(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincospi_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
double sinh(double __x) { return __ocml_sinh_f64(__x); }
__DEVICE__
double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
__DEVICE__
double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
__DEVICE__
double tan(double __x) { return __ocml_tan_f64(__x); }
__DEVICE__
double tanh(double __x) { return __ocml_tanh_f64(__x); }
__DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
double trunc(double __x) { return __ocml_trunc_f64(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
__DEVICE__
double y1(double __x) { return __ocml_y1_f64(__x); }
__DEVICE__
double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return y0(__x);
if (__n == 1)
return y1(__x);
double __x0 = y0(__x);
double __x1 = y1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
// BEGIN INTRINSICS
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dadd_rd(double __x, double __y) {
return __ocml_add_rtn_f64(__x, __y);
}
__DEVICE__
double __dadd_rn(double __x, double __y) {
return __ocml_add_rte_f64(__x, __y);
}
__DEVICE__
double __dadd_ru(double __x, double __y) {
return __ocml_add_rtp_f64(__x, __y);
}
__DEVICE__
double __dadd_rz(double __x, double __y) {
return __ocml_add_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dadd_rn(double __x, double __y) { return __x + __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __ddiv_rd(double __x, double __y) {
return __ocml_div_rtn_f64(__x, __y);
}
__DEVICE__
double __ddiv_rn(double __x, double __y) {
return __ocml_div_rte_f64(__x, __y);
}
__DEVICE__
double __ddiv_ru(double __x, double __y) {
return __ocml_div_rtp_f64(__x, __y);
}
__DEVICE__
double __ddiv_rz(double __x, double __y) {
return __ocml_div_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __ddiv_rn(double __x, double __y) { return __x / __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dmul_rd(double __x, double __y) {
return __ocml_mul_rtn_f64(__x, __y);
}
__DEVICE__
double __dmul_rn(double __x, double __y) {
return __ocml_mul_rte_f64(__x, __y);
}
__DEVICE__
double __dmul_ru(double __x, double __y) {
return __ocml_mul_rtp_f64(__x, __y);
}
__DEVICE__
double __dmul_rz(double __x, double __y) {
return __ocml_mul_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dmul_rn(double __x, double __y) { return __x * __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); }
__DEVICE__
double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); }
__DEVICE__
double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); }
__DEVICE__
double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); }
#else
__DEVICE__
double __drcp_rn(double __x) { return 1.0 / __x; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
__DEVICE__
double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }
__DEVICE__
double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
__DEVICE__
double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
#else
__DEVICE__
double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dsub_rd(double __x, double __y) {
return __ocml_sub_rtn_f64(__x, __y);
}
__DEVICE__
double __dsub_rn(double __x, double __y) {
return __ocml_sub_rte_f64(__x, __y);
}
__DEVICE__
double __dsub_ru(double __x, double __y) {
return __ocml_sub_rtp_f64(__x, __y);
}
__DEVICE__
double __dsub_rz(double __x, double __y) {
return __ocml_sub_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dsub_rn(double __x, double __y) { return __x - __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __fma_rd(double __x, double __y, double __z) {
return __ocml_fma_rtn_f64(__x, __y, __z);
}
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
return __ocml_fma_rte_f64(__x, __y, __z);
}
__DEVICE__
double __fma_ru(double __x, double __y, double __z) {
return __ocml_fma_rtp_f64(__x, __y, __z);
}
__DEVICE__
double __fma_rz(double __x, double __y, double __z) {
return __ocml_fma_rtz_f64(__x, __y, __z);
}
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
}
#endif
// END INTRINSICS
// END DOUBLE
// C only macros
#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)
#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)
#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)
#define signbit(__x) \
_Generic((__x), float : __signbitf, double : __signbit)(__x)
#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
#if defined(__cplusplus)
template <class T> __DEVICE__ T min(T __arg1, T __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
template <class T> __DEVICE__ T max(T __arg1, T __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
__DEVICE__ int min(int __arg1, int __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
__DEVICE__ int max(int __arg1, int __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
__DEVICE__
float max(float __x, float __y) { return fmaxf(__x, __y); }
__DEVICE__
double max(double __x, double __y) { return fmax(__x, __y); }
__DEVICE__
float min(float __x, float __y) { return fminf(__x, __y); }
__DEVICE__
double min(double __x, double __y) { return fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
return std::min(__arg1, __arg2);
}
__host__ inline static int max(int __arg1, int __arg2) {
return std::max(__arg1, __arg2);
}
#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
#endif
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__RETURN_TYPE")
#endif // __CLANG_HIP_MATH_H__
|
parallel_word_counter.c | #include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <omp.h>
int n_threads;
char *text;
char keywords[][10] = {
"a",
"as",
"at",
"be",
"do",
"is",
"in",
"it",
"of",
"he",
"on",
"to",
"but",
"for",
"you",
"not",
"and",
"the",
"was",
"God",
"with",
"this",
"that",
"have",
"LORD",
"said",
"faith",
"Jesus",
"waters",
"devils",
"thyself",
"neighbour",
};
int read_text_file()
{
int c;
FILE *file;
file = fopen("text.txt", "r");
if (file)
{
int text_size = 0;
fseek(file, 0L, SEEK_END);
text_size = ftell(file);
text = (char *)malloc(text_size * sizeof(char));
rewind(file);
fread(text, sizeof(char), text_size, file);
fclose(file);
return text_size;
}
return -1;
}
void print_results(int *counters, int number_of_keywords)
{
printf("\033[0;32m");
printf("\n #####################################");
printf("\n %-11s%15s%11s", "# ", "Keyword Counter", " #");
printf("\n #####################################");
printf("\n # %-22s%11s #", "Keyword", "Occurrences");
printf("\n #####################################");
for (int j = 0; j < number_of_keywords; j++)
printf("\n # %-22s%11d #", keywords[j], counters[j]);
printf("\n #####################################\n\n");
printf("\033[0m");
}
int isEndOfWord(char x)
{
return x == ' ' || x == ',' || x == '.' || x == ':' || x == ';' || x == '!' || x == '?' || x == ')' || x == '\'' || x == '\n';
}
int isStartOfWord(char x)
{
return x == ' ' || x == '\n' || x == '(';
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
printf("You must type in the number of threads that you want to use!\n");
return 0;
}
// get number of threads from argv
errno = 0;
char *param;
long conv = strtol(argv[1], ¶m, 10);
if (errno != 0 || *param != '\0')
{
printf("The first argument must be a number!\n");
return 0;
}
else
n_threads = conv;
omp_set_num_threads(n_threads);
int text_size = read_text_file();
int number_of_keywords = (int)sizeof(keywords) / sizeof(keywords[0]);
// initialize counters array
int counters[number_of_keywords];
for (int i = 0; i < number_of_keywords; i++)
counters[i] = 0;
#pragma omp parallel for
for (int j = 0; j < number_of_keywords; j++)
{
int keyword_size = strlen(keywords[j]);
char *keyword = keywords[j];
int keyword_cursor = 0;
for (int i = 0; i < text_size; i++)
{
if (*(text + i) == keyword[keyword_cursor])
{
keyword_cursor++;
if (keyword_cursor == keyword_size && isEndOfWord(*(text + i + 1)) && (i - keyword_size == 0 || isStartOfWord(*(text + i - keyword_size))))
counters[j]++;
}
else if (keyword_cursor)
keyword_cursor = 0;
}
}
print_results(counters, number_of_keywords);
return 0;
}
|
kij_optimize.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int A_row;
int A_col;
int B_row;
int B_col;
int **constructMatrix(int row, int col){
int **matrix = (int **)malloc(sizeof(int *) * row);
for (int i = 0; i < row;i++){
matrix[i] = (int *)malloc(sizeof(int) * col);
}
return matrix;
}
void freeMatrix(int **matrix, int row, int col){
for (int i = 0; i < row;i++){
free(matrix[i]);
}
free(matrix);
}
int main(int argc, char *argv[]){
A_row = atoi(*(argv + 1));
A_col = atoi(*(argv + 2));
B_row = atoi(*(argv + 3));
B_col = atoi(*(argv + 4));
int number_of_threads = atoi(*(argv + 5));
FILE *input = fopen("matrix", "r");
int **A = constructMatrix(A_row, A_col);
int **B = constructMatrix(B_row, B_col);
int **C = constructMatrix(A_row, B_col);
//read A
for (int i = 0; i < A_row;i++){
for (int j = 0; j < A_col;j++){
fscanf(input, "%d", &A[i][j]);
}
}
//read B
for (int i = 0; i < B_row;i++){
for (int j = 0; j < B_col;j++){
fscanf(input, "%d", &B[i][j]);
}
}
fclose(input);
double start_time = omp_get_wtime();
//multiply:
int i, j, k;
int temp;
#pragma omp parallel for shared(A,B,C) private(i,j,k,temp) num_threads(number_of_threads)
for (k = 0; k < A_col;k++){
for (i = 0; i < A_row;i++){
temp = A[i][k];
for (j = 0; j < B_col;j++){
#pragma omp atomic
C[i][j] += temp * B[k][j];
}
}
}
double end_time = omp_get_wtime();
printf("%s: %g sec.\n", "kij_optimize_runtime", end_time - start_time);
//output the result to compare with golden result
FILE *out = fopen("kij_optimize_result", "w");
for (int i = 0; i < A_row;i++){
for (int j = 0; j < B_col;j++){
fprintf(out, "%d ", C[i][j]);
}
fprintf(out, "\n");
}
fprintf(out, "\n");
fclose(out);
freeMatrix(A, A_row, A_col);
freeMatrix(B, B_row, B_col);
freeMatrix(C, A_row, B_col);
return 0;
} |
sgd.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "completion.h"
#include "../csf.h"
#include "../reorder.h"
#include "../util.h"
#include "../thd_info.h"
#include "../io.h"
#include <math.h>
#define USE_CSF_SGD 1
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Update a three-mode model based on a given observation.
*
* @param train The training data.
* @param nnz_index The index of the observation to update from.
* @param model The model to update.
* @param ws Workspace to use.
*/
static inline void p_update_model3(
sptensor_t const * const train,
idx_t const nnz_index,
tc_model * const model,
tc_ws * const ws)
{
idx_t const nfactors = model->rank;
idx_t const x = nnz_index;
assert(train->nmodes == 3);
idx_t * * const ind = train->ind;
val_t * const restrict arow = model->factors[0] + (ind[0][x] * nfactors);
val_t * const restrict brow = model->factors[1] + (ind[1][x] * nfactors);
val_t * const restrict crow = model->factors[2] + (ind[2][x] * nfactors);
/* predict value */
val_t predicted = 0;
for(idx_t f=0; f < nfactors; ++f) {
predicted += arow[f] * brow[f] * crow[f];
}
val_t const loss = train->vals[x] - predicted;
val_t const rate = ws->learn_rate;
val_t const * const restrict reg = ws->regularization;
/* update rows */
for(idx_t f=0; f < nfactors; ++f) {
val_t const moda = (loss * brow[f] * crow[f]) - (reg[0] * arow[f]);
val_t const modb = (loss * arow[f] * crow[f]) - (reg[1] * brow[f]);
val_t const modc = (loss * arow[f] * brow[f]) - (reg[2] * crow[f]);
arow[f] += rate * moda;
brow[f] += rate * modb;
crow[f] += rate * modc;
}
}
/**
* @brief Update a three-mode model based on the i-th node of a CSF tensor.
*
* @param train The training data (in CSf format).
* @param i Which node to process.
* @param model The model to update.
* @param ws Workspace to use.
*/
static inline void p_update_model_csf3(
splatt_csf const * const train,
idx_t const i,
tc_model * const model,
tc_ws * const ws)
{
idx_t const nfactors = model->rank;
csf_sparsity const * const pt = train->pt;
assert(model->nmodes == 3);
assert(train->ntiles == 1);
/* sparsity structure */
idx_t const * const restrict sptr = pt->fptr[0];
idx_t const * const restrict fptr = pt->fptr[1];
idx_t const * const restrict fids = pt->fids[1];
idx_t const * const restrict inds = pt->fids[2];
/* current model */
val_t const * const restrict vals = pt->vals;
val_t * const restrict avals = model->factors[train->dim_perm[0]];
val_t * const restrict bvals = model->factors[train->dim_perm[1]];
val_t * const restrict cvals = model->factors[train->dim_perm[2]];
val_t const rate = ws->learn_rate;
val_t const areg = ws->regularization[train->dim_perm[0]];
val_t const breg = ws->regularization[train->dim_perm[1]];
val_t const creg = ws->regularization[train->dim_perm[2]];
/* grab the top-level row */
idx_t const a_id = (pt->fids[0] == NULL) ? i : pt->fids[0][i];
val_t * const restrict arow = avals + (a_id * nfactors);
/* process each fiber */
for(idx_t fib=sptr[i]; fib < sptr[i+1]; ++fib) {
val_t * const restrict brow = bvals + (fids[fib] * nfactors);
/* foreach nnz in fiber */
for(idx_t jj=fptr[fib]; jj < fptr[fib+1]; ++jj) {
val_t * const restrict crow = cvals + (inds[jj] * nfactors);
/* compute the loss */
val_t loss = vals[jj];
for(idx_t f=0; f < nfactors; ++f) {
loss -= arow[f] * brow[f] * crow[f];
}
/* update model */
for(idx_t f=0; f < nfactors; ++f) {
/* compute all modifications FIRST since we are updating all rows */
val_t const moda = (loss * brow[f] * crow[f]) - (areg * arow[f]);
val_t const modb = (loss * arow[f] * crow[f]) - (breg * brow[f]);
val_t const modc = (loss * arow[f] * brow[f]) - (creg * crow[f]);
arow[f] += rate * moda;
brow[f] += rate * modb;
crow[f] += rate * modc;
}
}
} /* foreach fiber */
}
/**
* @brief Update a model based on a given observation.
*
* @param train The training data.
* @param nnz_index The index of the observation to update from.
* @param model The model to update.
* @param ws Workspace to use.
*/
static void p_update_model(
sptensor_t const * const train,
idx_t const nnz_index,
tc_model * const model,
tc_ws * const ws)
{
idx_t const nmodes = train->nmodes;
if(nmodes == 3) {
p_update_model3(train, nnz_index, model, ws);
return;
}
idx_t const nfactors = model->rank;
idx_t const x = nnz_index;
val_t * const restrict buffer = ws->thds[splatt_omp_get_thread_num()].scratch[0];
/* compute the error */
val_t const err = train->vals[x] - tc_predict_val(model, train, x, buffer);
idx_t * * const ind = train->ind;
/* update each of the factor (row-wise) */
for(idx_t m=0; m < nmodes; ++m) {
/* first fill buffer with the Hadamard product of all rows but current */
idx_t moff = (m + 1) % nmodes;
val_t const * const restrict init_row = model->factors[moff] +
(ind[moff][x] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buffer[f] = init_row[f];
}
for(moff = 2; moff < nmodes; ++moff) {
idx_t const madj = (m + moff) % nmodes;
val_t const * const restrict row = model->factors[madj] +
(ind[madj][x] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buffer[f] *= row[f];
}
}
/* now actually update the row */
val_t * const restrict update_row = model->factors[m] +
(ind[m][x] * nfactors);
val_t const reg = ws->regularization[m];
val_t const rate = ws->learn_rate;
for(idx_t f=0; f < nfactors; ++f) {
update_row[f] += rate * ((err * buffer[f]) - (reg * update_row[f]));
}
}
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void splatt_tc_sgd(
sptensor_t * train,
sptensor_t const * const validate,
tc_model * const model,
tc_ws * const ws)
{
idx_t const nfactors = model->rank;
#if USE_CSF_SGD
/* convert training data to a single CSF */
double * opts = splatt_default_opts();
opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE;
splatt_csf * csf = splatt_malloc(sizeof(*csf));
csf_alloc_mode(train, CSF_SORTED_BIGFIRST, 0, csf, opts);
assert(csf->ntiles == 1);
idx_t const nslices = csf[0].pt->nfibs[0];
idx_t * perm_i = splatt_malloc(nslices * sizeof(*perm_i));
for(idx_t n=0; n < nslices; ++n) {
perm_i[n] = n;
}
#else
/* initialize perm */
idx_t * perm = splatt_malloc(train->nnz * sizeof(*perm));
for(idx_t n=0; n < train->nnz; ++n) {
perm[n] = n;
}
#endif
val_t loss = tc_loss_sq(train, model, ws);
val_t frobsq = tc_frob_sq(model, ws);
tc_converge(train, validate, model, loss, frobsq, 0, ws);
/* for bold driver */
val_t obj = loss + frobsq;
val_t prev_obj = obj;
timer_start(&ws->tc_time);
/* foreach epoch */
for(idx_t e=1; e < ws->max_its+1; ++e) {
/* update model from all training observations */
#if USE_CSF_SGD
shuffle_idx(perm_i, nslices);
#pragma omp parallel for schedule(dynamic, 16)
for(idx_t i=0; i < nslices; ++i) {
p_update_model_csf3(csf, perm_i[i], model, ws);
}
#else
shuffle_idx(perm, train->nnz);
#pragma omp parallel for schedule(static, 1)
for(idx_t n=0; n < train->nnz; ++n) {
p_update_model(train, perm[n], model, ws);
}
#endif
/* compute RMSE and adjust learning rate */
loss = tc_loss_sq(train, model, ws);
frobsq = tc_frob_sq(model, ws);
obj = loss + frobsq;
if(tc_converge(train, validate, model, loss, frobsq, e, ws)) {
break;
}
/* bold driver */
if(e > 1) {
if(obj < prev_obj) {
ws->learn_rate *= 1.05;
} else {
ws->learn_rate *= 0.50;
}
}
prev_obj = obj;
}
#if USE_CSF_SGD
splatt_free(perm_i);
csf_free_mode(csf);
splatt_free(csf);
splatt_free_opts(opts);
#else
splatt_free(perm);
#endif
}
|
convolution_1x1_pack8to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to4_avx(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _v = _mm256_load_ps(r0);
_mm256_store_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
findSubGraphs.c | #include "defs.h"
double findSubGraphs(graph* G,
edge* maxIntWtList, int maxIntWtListSize) {
VERT_T* S;
LONG_T *start;
char* visited;
LONG_T *pSCount;
#ifdef _OPENMP
omp_lock_t* vLock;
#endif
LONG_T phase_num, numPhases;
LONG_T count;
double elapsed_time = get_seconds();
numPhases = SubGraphPathLength + 1;
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
#endif
{
VERT_T *pS, *pSt;
LONG_T pCount, pS_size;
LONG_T v, w, search_num;
int tid, nthreads;
LONG_T j, k, vert, n;
#ifdef _OPENMP
LONG_T i;
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
n = G->n;
pS_size = n/nthreads + 1;
pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T));
assert(pS != NULL);
if (tid == 0) {
S = (VERT_T *) malloc(n*sizeof(VERT_T));
visited = (char *) calloc(n, sizeof(char));
start = (LONG_T *) calloc((numPhases+2), sizeof(LONG_T));
pSCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T));
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
#endif
}
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#endif
for (search_num=0; search_num<maxIntWtListSize; search_num++) {
#ifdef _OPENMP
#pragma omp barrier
#endif
/* Run path-limited BFS in parallel */
if (tid == 0) {
free(visited);
visited = (char *) calloc(n, sizeof(char));
S[0] = maxIntWtList[search_num].startVertex;
S[1] = maxIntWtList[search_num].endVertex;
visited[S[0]] = (char) 1;
visited[S[1]] = (char) 1;
count = 2;
phase_num = 1;
start[0] = 0;
start[1] = 1;
start[2] = 2;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
while (phase_num <= SubGraphPathLength) {
pCount = 0;
#ifdef _OPENMP
#pragma omp for
#endif
for (vert=start[phase_num]; vert<start[phase_num+1]; vert++) {
v = S[vert];
for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) {
w = G->endV[j];
if (v == w)
continue;
#ifdef _OPENMP
int myLock = omp_test_lock(&vLock[w]);
if (myLock) {
#endif
if (visited[w] != (char) 1) {
visited[w] = (char) 1;
if (pCount == pS_size) {
/* Resize pS */
pSt = (VERT_T *)
malloc(2*pS_size*sizeof(VERT_T));
memcpy(pSt, pS, pS_size*sizeof(VERT_T));
free(pS);
pS = pSt;
pS_size = 2*pS_size;
}
pS[pCount++] = w;
}
#ifdef _OPENMP
omp_unset_lock(&vLock[w]);
}
#endif
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif
pSCount[tid+1] = pCount;
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
pSCount[0] = start[phase_num+1];
for(k=1; k<=nthreads; k++) {
pSCount[k] = pSCount[k-1] + pSCount[k];
}
start[phase_num+2] = pSCount[nthreads];
count = pSCount[nthreads];
phase_num++;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
for (k = pSCount[tid]; k < pSCount[tid+1]; k++) {
S[k] = pS[k-pSCount[tid]];
}
#ifdef _OPENMP
#pragma omp barrier
#endif
} /* End of search */
if (tid == 0) {
fprintf(stderr, "Search from <%ld, %ld>, number of vertices visited:"
" %ld\n", (long) S[0], (long) S[1], (long) count);
}
} /* End of outer loop */
free(pS);
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
#pragma omp barrier
#endif
if (tid == 0) {
/* free(S); PHJK: crashes under simplescalar */
free(start);
free(visited);
free(pSCount);
#ifdef _OPENMP
free(vLock);
#endif
}
#ifdef _OPENMP
#endif
}
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
d3q15.h | //*****************************************************************************
// Title : src/particle/d3q15.h
// Author : Tanabe Yuta
// Date : 2020/10/29
// Copyright : (C)2020 TanabeYuta
//*****************************************************************************
#pragma once
#include <cassert>
#ifdef _USE_MPI_DEFINES
#include "mpi.h"
#endif
#ifdef _USE_AVX_DEFINES
#include <immintrin.h>
#endif
namespace PANSLBM2 {
namespace {
const int BARRIER = 1;
const int MIRROR = 2;
}
template<class T>
class D3Q15 {
public:
D3Q15() = delete;
D3Q15(int _lx, int _ly, int _lz, int _PEid = 0, int _mx = 1, int _my = 1, int _mz = 1) :
lx(_lx), ly(_ly), lz(_lz), PEid(_PEid), mx(_mx), my(_my), mz(_mz),
PEx(this->PEid%this->mx), PEy((this->PEid/this->mx)%this->my), PEz(this->PEid/(this->mx*this->my)),
nx((this->lx + this->PEx)/this->mx), ny((this->ly + this->PEy)/this->my), nz((this->lz + this->PEz)/this->mz),
nxyz(this->nx*this->ny*this->nz),
offsetx(this->mx - this->PEx > this->lx%this->mx ? this->PEx*this->nx : this->lx - (this->mx - this->PEx)*this->nx),
offsety(this->my - this->PEy > this->ly%this->my ? this->PEy*this->ny : this->ly - (this->my - this->PEy)*this->ny),
offsetz(this->mz - this->PEz > this->lz%this->mz ? this->PEz*this->nz : this->lz - (this->mz - this->PEz)*this->nz)
{
assert(0 < _lx && 0 < _ly && 0 < _lz && 0 <= _PEid && 0 < _mx && 0 < _my && 0 < _mz);
#ifdef _USE_AVX_DEFINES
this->f0 = (T*)_mm_malloc(sizeof(T)*this->nxyz, 32);
this->f = (T*)_mm_malloc(sizeof(T)*this->nxyz*(D3Q15<T>::nc - 1), 32);
this->fnext = (T*)_mm_malloc(sizeof(T)*this->nxyz*(D3Q15<T>::nc - 1), 32);
#else
this->f0 = new T[this->nxyz];
this->f = new T[this->nxyz*(D3Q15<T>::nc - 1)];
this->fnext = new T[this->nxyz*(D3Q15<T>::nc - 1)];
#endif
this->fsend_xmin = new T[this->ny*this->nz*5];
this->fsend_xmax = new T[this->ny*this->nz*5];
this->fsend_ymin = new T[this->nz*this->nx*5];
this->fsend_ymax = new T[this->nz*this->nx*5];
this->fsend_zmin = new T[this->nx*this->ny*5];
this->fsend_zmax = new T[this->nx*this->ny*5];
this->frecv_xmin = new T[this->ny*this->nz*5];
this->frecv_xmax = new T[this->ny*this->nz*5];
this->frecv_ymin = new T[this->nz*this->nx*5];
this->frecv_ymax = new T[this->nz*this->nx*5];
this->frecv_zmin = new T[this->nx*this->ny*5];
this->frecv_zmax = new T[this->nx*this->ny*5];
this->fsend_ymin_zmin = new T[this->nx*2];
this->fsend_ymin_zmax = new T[this->nx*2];
this->fsend_ymax_zmin = new T[this->nx*2];
this->fsend_ymax_zmax = new T[this->nx*2];
this->frecv_ymin_zmin = new T[this->nx*2];
this->frecv_ymin_zmax = new T[this->nx*2];
this->frecv_ymax_zmin = new T[this->nx*2];
this->frecv_ymax_zmax = new T[this->nx*2];
this->fsend_zmin_xmin = new T[this->ny*2];
this->fsend_zmin_xmax = new T[this->ny*2];
this->fsend_zmax_xmin = new T[this->ny*2];
this->fsend_zmax_xmax = new T[this->ny*2];
this->frecv_zmin_xmin = new T[this->ny*2];
this->frecv_zmin_xmax = new T[this->ny*2];
this->frecv_zmax_xmin = new T[this->ny*2];
this->frecv_zmax_xmax = new T[this->ny*2];
this->fsend_xmin_ymin = new T[this->nz*2];
this->fsend_xmin_ymax = new T[this->nz*2];
this->fsend_xmax_ymin = new T[this->nz*2];
this->fsend_xmax_ymax = new T[this->nz*2];
this->frecv_xmin_ymin = new T[this->nz*2];
this->frecv_xmin_ymax = new T[this->nz*2];
this->frecv_xmax_ymin = new T[this->nz*2];
this->frecv_xmax_ymax = new T[this->nz*2];
#ifdef _USE_AVX_DEFINES
D3Q15<T>::LoadCxCyCzEi();
#endif
}
D3Q15(const D3Q15<T>& _p) = delete;
~D3Q15() {
#ifdef _USE_AVX_DEFINES
_mm_free(this->f0);
_mm_free(this->f);
_mm_free(this->fnext);
#else
delete[] this->f0;
delete[] this->f;
delete[] this->fnext;
#endif
delete[] this->fsend_xmin;
delete[] this->fsend_xmax;
delete[] this->fsend_ymin;
delete[] this->fsend_ymax;
delete[] this->fsend_zmin;
delete[] this->fsend_zmax;
delete[] this->frecv_xmin;
delete[] this->frecv_xmax;
delete[] this->frecv_ymin;
delete[] this->frecv_ymax;
delete[] this->frecv_zmin;
delete[] this->frecv_zmax;
delete[] this->fsend_ymin_zmin;
delete[] this->fsend_ymin_zmax;
delete[] this->fsend_ymax_zmin;
delete[] this->fsend_ymax_zmax;
delete[] this->frecv_ymin_zmin;
delete[] this->frecv_ymin_zmax;
delete[] this->frecv_ymax_zmin;
delete[] this->frecv_ymax_zmax;
delete[] this->fsend_zmin_xmin;
delete[] this->fsend_zmin_xmax;
delete[] this->fsend_zmax_xmin;
delete[] this->fsend_zmax_xmax;
delete[] this->frecv_zmin_xmin;
delete[] this->frecv_zmin_xmax;
delete[] this->frecv_zmax_xmin;
delete[] this->frecv_zmax_xmax;
delete[] this->fsend_xmin_ymin;
delete[] this->fsend_xmin_ymax;
delete[] this->fsend_xmax_ymin;
delete[] this->fsend_xmax_ymax;
delete[] this->frecv_xmin_ymin;
delete[] this->frecv_xmin_ymax;
delete[] this->frecv_xmax_ymin;
delete[] this->frecv_xmax_ymax;
}
int Index(int _i, int _j, int _k) const {
int i = _i == -1 ? this->nx - 1 : (_i == this->nx ? 0 : _i);
int j = _j == -1 ? this->ny - 1 : (_j == this->ny ? 0 : _j);
int k = _k == -1 ? this->nz - 1 : (_k == this->nz ? 0 : _k);
return i + this->nx*j + this->nx*this->ny*k;
}
static int IndexF(int _idx, int _c) {
return (D3Q15<T>::nc - 1)*_idx + (_c - 1);
}
int IndexPE(int _i, int _j, int _k) const {
int i = _i == -1 ? this->mx - 1 : (_i == this->mx ? 0 : _i);
int j = _j == -1 ? this->my - 1 : (_j == this->my ? 0 : _j);
int k = _k == -1 ? this->mz - 1 : (_k == this->mz ? 0 : _k);
return i + this->mx*j + this->mx*this->my*k;
}
int IndexBCx(int _j, int _k) const {
return _j + this->ny*_k;
}
int IndexBCy(int _k, int _i) const {
return _k + this->nz*_i;
}
int IndexBCz(int _i, int _j) const {
return _i + this->nx*_j;
}
void Stream();
void iStream();
template<class Ff>
void BoundaryCondition(Ff _bctype);
template<class Ff>
void iBoundaryCondition(Ff _bctype);
void SmoothCorner();
const int lx, ly, lz, PEid, mx, my, mz, PEx, PEy, PEz, nx, ny, nz, nxyz, offsetx, offsety, offsetz;
static const int nc = 15, nd = 3, cx[nc], cy[nc], cz[nc];
static const T ei[nc];
T *f0, *f;
#ifdef _USE_AVX_DEFINES
static const int packsize = 32/sizeof(T);
static __m256d __cx[nc], __cy[nc], __cz[nc], __ei[nc]; // If you use any type except double, cast these values.
static void LoadCxCyCzEi();
template<class mmT>
void LoadF(int _idx, mmT *__f);
template<class mmT>
void StoreF(int _idx, const mmT *__f);
#endif
private:
T *fnext;
T *fsend_xmin, *fsend_xmax, *fsend_ymin, *fsend_ymax, *fsend_zmin, *fsend_zmax, *frecv_xmin, *frecv_xmax, *frecv_ymin, *frecv_ymax, *frecv_zmin, *frecv_zmax;
T *fsend_ymin_zmin, *fsend_ymin_zmax, *fsend_ymax_zmin, *fsend_ymax_zmax, *frecv_ymin_zmin, *frecv_ymin_zmax, *frecv_ymax_zmin, *frecv_ymax_zmax;
T *fsend_zmin_xmin, *fsend_zmin_xmax, *fsend_zmax_xmin, *fsend_zmax_xmax, *frecv_zmin_xmin, *frecv_zmin_xmax, *frecv_zmax_xmin, *frecv_zmax_xmax;
T *fsend_xmin_ymin, *fsend_xmin_ymax, *fsend_xmax_ymin, *fsend_xmax_ymax, *frecv_xmin_ymin, *frecv_xmin_ymax, *frecv_xmax_ymin, *frecv_xmax_ymax;
T fsend_corner[8], frecv_corner[8];
#ifdef _USE_MPI_DEFINES
MPI_Status status[52];
MPI_Request request[52];
#endif
};
template<class T>const int D3Q15<T>::cx[D3Q15<T>::nc] = { 0, 1, 0, 0, -1, 0, 0, 1, -1, 1, 1, -1, 1, -1, -1 };
template<class T>const int D3Q15<T>::cy[D3Q15<T>::nc] = { 0, 0, 1, 0, 0, -1, 0, 1, 1, -1, 1, -1, -1, 1, -1 };
template<class T>const int D3Q15<T>::cz[D3Q15<T>::nc] = { 0, 0, 0, 1, 0, 0, -1, 1, 1, 1, -1, -1, -1, -1, 1 };
template<class T>const T D3Q15<T>::ei[D3Q15<T>::nc] = { 2.0/9.0, 1.0/9.0, 1.0/9.0, 1.0/9.0, 1.0/9.0, 1.0/9.0, 1.0/9.0, 1.0/72.0, 1.0/72.0, 1.0/72.0, 1.0/72.0, 1.0/72.0, 1.0/72.0, 1.0/72.0, 1.0/72.0 };
template<class T>
void D3Q15<T>::Stream() {
// Stream
#pragma omp parallel for
for (int k = 0; k < this->nz; ++k) {
for (int j = 0; j < this->ny; ++j) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, j, k);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
int idxstream = this->Index(i - D3Q15<T>::cx[c], j - D3Q15<T>::cy[c], k - D3Q15<T>::cz[c]);
this->fnext[D3Q15<T>::IndexF(idx, c)] = this->f[D3Q15<T>::IndexF(idxstream, c)];
}
}
}
}
// Swap
T *tmp = this->f;
this->f = this->fnext;
this->fnext = tmp;
#ifdef _USE_MPI_DEFINES
int idx, idxface;
// Copy from f to fsend along edge or at corner
if (this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
// Face on xmin
idx = this->Index(this->nx - 1, j, k);
idxface = this->IndexBCx(j, k);
this->fsend_xmin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->fsend_xmin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_xmin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_xmin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->fsend_xmin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Face on xmax
idx = this->Index(0, j, k);
idxface = this->IndexBCx(j, k);
this->fsend_xmax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->fsend_xmax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_xmax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_xmax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_xmax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 12)];
}
}
}
if (this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
// Face on ymin
idx = this->Index(i, this->ny - 1, k);
idxface = this->IndexBCy(k, i);
this->fsend_ymin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->fsend_ymin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_ymin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_ymin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->fsend_ymin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Face on ymax
idx = this->Index(i, 0, k);
idxface = this->IndexBCy(k, i);
this->fsend_ymax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->fsend_ymax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_ymax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_ymax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_ymax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 13)];
}
}
}
if (this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
// Face on zmin
idx = this->Index(i, j, this->nz - 1);
idxface = this->IndexBCz(i, j);
this->fsend_zmin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->fsend_zmin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_zmin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_zmin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->fsend_zmin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Face on zmax
idx = this->Index(i, j, 0);
idxface = this->IndexBCz(i, j);
this->fsend_zmax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->fsend_zmax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_zmax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_zmax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_zmax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
}
}
}
if (this->my != 1 || this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
// Edge on ymin and zmin
idx = this->Index(i, this->ny - 1, this->nz - 1);
this->fsend_ymin_zmin[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_ymin_zmin[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Edge on ymin and zmax
idx = this->Index(i, this->ny - 1, 0);
this->fsend_ymin_zmax[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_ymin_zmax[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Edge on ymax and zmin
idx = this->Index(i, 0, this->nz - 1);
this->fsend_ymax_zmin[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_ymax_zmin[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Edge on ymax and zmax
idx = this->Index(i, 0, 0);
this->fsend_ymax_zmax[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_ymax_zmax[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 8)];
}
}
if (this->mz != 1 || this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
// Edge on zmin and xmin
idx = this->Index(this->nx - 1, j, this->nz - 1);
this->fsend_zmin_xmin[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_zmin_xmin[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Edge on zmin and xmax
idx = this->Index(0, j, this->nz - 1);
this->fsend_zmin_xmax[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_zmin_xmax[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Edge on zmax and xmin
idx = this->Index(this->nx - 1, j, 0);
this->fsend_zmax_xmin[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_zmax_xmin[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Edge on zmax and xmax
idx = this->Index(0, j, 0);
this->fsend_zmax_xmax[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_zmax_xmax[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 9)];
}
}
if (this->mx != 1 || this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
// Edge on xmin and ymin
idx = this->Index(this->nx - 1, this->ny - 1, k);
this->fsend_xmin_ymin[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_xmin_ymin[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Edge on xmin and ymax
idx = this->Index(this->nx - 1, 0, k);
this->fsend_xmin_ymax[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_xmin_ymax[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Edge on xmax and ymin
idx = this->Index(0, this->ny - 1, k);
this->fsend_xmax_ymin[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_xmax_ymin[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Edge on xmax and ymax
idx = this->Index(0, 0, k);
this->fsend_xmax_ymax[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_xmax_ymax[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 10)];
}
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
this->fsend_corner[0] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, this->nz - 1), 11)]; // Corner at xmin, ymin and zmin
this->fsend_corner[1] = this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, this->nz - 1), 12)]; // Corner at xmax, ymin and zmin
this->fsend_corner[2] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, this->nz - 1), 13)]; // Corner at xmin, ymax and zmin
this->fsend_corner[3] = this->f[D3Q15<T>::IndexF(this->Index(0, 0, this->nz - 1), 10)]; // Corner at xmax, ymax and zmin
this->fsend_corner[4] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, 0), 14)]; // Corner at xmin, ymin and zmax
this->fsend_corner[5] = this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, 0), 9)]; // Corner at xmax, ymin and zmax
this->fsend_corner[6] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, 0), 8)]; // Corner at xmin, ymax and zmax
this->fsend_corner[7] = this->f[D3Q15<T>::IndexF(this->Index(0, 0, 0), 7)]; // Corner at xmax, ymax and zmax
}
// Communicate with other PE
int neib = 0;
if (this->mx != 1) {
// To xmin
MPI_Isend(this->fsend_xmin, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz), 0, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz), 0, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax
MPI_Isend(this->fsend_xmax, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz), 1, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz), 1, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->my != 1) {
// To ymin
MPI_Isend(this->fsend_ymin, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz), 2, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz), 2, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax
MPI_Isend(this->fsend_ymax, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz), 3, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz), 3, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mz != 1) {
// To zmin
MPI_Isend(this->fsend_zmin, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz - 1), 4, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz + 1), 4, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax
MPI_Isend(this->fsend_zmax, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz + 1), 5, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz - 1), 5, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->my != 1 || this->mz != 1) {
// To ymin and zmin
MPI_Isend(this->fsend_ymin_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz - 1), 6, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz + 1), 6, MPI_COMM_WORLD, &this->request[neib++]);
// To ymin and zmax
MPI_Isend(this->fsend_ymin_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz + 1), 7, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz - 1), 7, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax and zmin
MPI_Isend(this->fsend_ymax_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz - 1), 8, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz + 1), 8, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax and zmax
MPI_Isend(this->fsend_ymax_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz + 1), 9, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz - 1), 9, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mz != 1 || this->mx != 1) {
// To zmin and xmin
MPI_Isend(this->fsend_zmin_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz - 1), 10, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz + 1), 10, MPI_COMM_WORLD, &this->request[neib++]);
// To zmin and xmax
MPI_Isend(this->fsend_zmin_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz - 1), 11, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz + 1), 11, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax and xmin
MPI_Isend(this->fsend_zmax_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz + 1), 12, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz - 1), 12, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax and xmax
MPI_Isend(this->fsend_zmax_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz + 1), 13, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz - 1), 13, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mx != 1 || this->my != 1) {
// To xmin and ymin
MPI_Isend(this->fsend_xmin_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz), 14, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz), 14, MPI_COMM_WORLD, &this->request[neib++]);
// To xmin and ymax
MPI_Isend(this->fsend_xmin_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz), 15, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz), 15, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax and ymin
MPI_Isend(this->fsend_xmax_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz), 16, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz), 16, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax and ymax
MPI_Isend(this->fsend_xmax_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz), 17, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz), 17, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
MPI_Isend(&this->fsend_corner[0], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz - 1), 18, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[7], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz - 1), 18, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymin and zmin
MPI_Isend(&this->fsend_corner[1], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz - 1), 19, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[6], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz - 1), 19, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymin and zmin
MPI_Isend(&this->fsend_corner[2], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz - 1), 20, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[5], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz - 1), 20, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymax and zmin
MPI_Isend(&this->fsend_corner[3], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz - 1), 21, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[4], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz - 1), 21, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymax and zmin
MPI_Isend(&this->fsend_corner[4], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz + 1), 22, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[3], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz + 1), 22, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymin and zmax
MPI_Isend(&this->fsend_corner[5], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz + 1), 23, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[2], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz + 1), 23, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymin and zmax
MPI_Isend(&this->fsend_corner[6], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz + 1), 24, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[1], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz + 1), 24, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymax and zmax
MPI_Isend(&this->fsend_corner[7], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz + 1), 25, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[0], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz + 1), 25, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymax and zmax
}
if (neib > 0) {
MPI_Waitall(neib, this->request, this->status);
}
// Copy to f from frecv along edge or at corner
if (this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
// Face on xmin
idx = this->Index(0, j, k);
idxface = this->IndexBCx(j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->frecv_xmin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_xmin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_xmin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_xmin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_xmin[idxface*5 + 4];
// Face on xmax
idx = this->Index(this->nx - 1, j, k);
idxface = this->IndexBCx(j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->frecv_xmax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_xmax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_xmax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_xmax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_xmax[idxface*5 + 4];
}
}
}
if (this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
// Face on ymin
idx = this->Index(i, 0, k);
idxface = this->IndexBCy(k, i);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->frecv_ymin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_ymin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_ymin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_ymin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_ymin[idxface*5 + 4];
// Face on ymax
idx = this->Index(i, this->ny - 1, k);
idxface = this->IndexBCy(k, i);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->frecv_ymax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_ymax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_ymax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_ymax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_ymax[idxface*5 + 4];
}
}
}
if (this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
// Face on zmin
idx = this->Index(i, j, 0);
idxface = this->IndexBCz(i, j);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->frecv_zmin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_zmin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_zmin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_zmin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_zmin[idxface*5 + 4];
// Face on zmax
idx = this->Index(i, j, this->nz - 1);
idxface = this->IndexBCz(i, j);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->frecv_zmax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_zmax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_zmax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_zmax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_zmax[idxface*5 + 4];
}
}
}
if (this->my != 1 || this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
// Edge on ymin and zmin
idx = this->Index(i, 0, 0);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_ymin_zmin[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_ymin_zmin[i*2 + 1];
// Edge on ymin and zmax
idx = this->Index(i, 0, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_ymin_zmax[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_ymin_zmax[i*2 + 1];
// Edge on ymax and zmin
idx = this->Index(i, this->ny - 1, 0);
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_ymax_zmin[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_ymax_zmin[i*2 + 1];
// Edge on ymax and zmax
idx = this->Index(i, this->ny - 1, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_ymax_zmax[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_ymax_zmax[i*2 + 1];
}
}
if (this->mz != 1 || this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
// Edge on zmin and xmin
idx = this->Index(0, j, 0);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_zmin_xmin[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_zmin_xmin[j*2 + 1];
// Edge on zmin and xmax
idx = this->Index(this->nx - 1, j, 0);
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_zmin_xmax[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_zmin_xmax[j*2 + 1];
// Edge on zmax and xmin
idx = this->Index(0, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_zmax_xmin[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_zmax_xmin[j*2 + 1];
// Edge on zmax and xmax
idx = this->Index(this->nx - 1, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_zmax_xmax[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_zmax_xmax[j*2 + 1];
}
}
if (this->mx != 1 || this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
// Edge on xmin and ymin
idx = this->Index(0, 0, k);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_xmin_ymin[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_xmin_ymin[k*2 + 1];
// Edge on xmin and ymax
idx = this->Index(0, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_xmin_ymax[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_xmin_ymax[k*2 + 1];
// Edge on xmax and ymin
idx = this->Index(this->nx - 1, 0, k);
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_xmax_ymin[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_xmax_ymin[k*2 + 1];
// Edge on xmax and ymax
idx = this->Index(this->nx - 1, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_xmax_ymax[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_xmax_ymax[k*2 + 1];
}
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
this->f[D3Q15<T>::IndexF(this->Index(0, 0, 0), 7)] = this->frecv_corner[0]; // Corner at xmin, ymin and zmin
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, 0), 8)] = this->frecv_corner[1]; // Corner at xmax, ymin and zmin
this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, 0), 9)] = this->frecv_corner[2]; // Corner at xmin, ymax and zmin
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, 0), 14)] = this->frecv_corner[3]; // Corner at xmax, ymax and zmin
this->f[D3Q15<T>::IndexF(this->Index(0, 0, this->nz - 1), 10)] = this->frecv_corner[4]; // Corner at xmin, ymin and zmax
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, this->nz - 1), 13)] = this->frecv_corner[5]; // Corner at xmax, ymin and zmax
this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, this->nz - 1), 12)] = this->frecv_corner[6]; // Corner at xmin, ymax and zmax
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, this->nz - 1), 11)] = this->frecv_corner[7]; // Corner at xmax, ymax and zmax
}
#endif
}
template<class T>
void D3Q15<T>::iStream() {
// Stream
#pragma omp parallel for
for (int k = 0; k < this->nz; ++k) {
for (int j = 0; j < this->ny; ++j) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, j, k);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
int idxstream = this->Index(i + D3Q15<T>::cx[c], j + D3Q15<T>::cy[c], k + D3Q15<T>::cz[c]);
this->fnext[D3Q15<T>::IndexF(idx, c)] = this->f[D3Q15<T>::IndexF(idxstream, c)];
}
}
}
}
// Swap
T *tmp = this->f;
this->f = this->fnext;
this->fnext = tmp;
#ifdef _USE_MPI_DEFINES
int idx, idxface;
// Copy from f to fsend along edge or at corner
if (this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
// Face on xmin
idx = this->Index(this->nx - 1, j, k);
idxface = this->IndexBCx(j, k);
this->fsend_xmin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->fsend_xmin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_xmin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_xmin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_xmin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Face on xmax
idx = this->Index(0, j, k);
idxface = this->IndexBCx(j, k);
this->fsend_xmax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->fsend_xmax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_xmax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_xmax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->fsend_xmax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
}
}
}
if (this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
// Face on ymin
idx = this->Index(i, this->ny - 1, k);
idxface = this->IndexBCy(k, i);
this->fsend_ymin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->fsend_ymin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_ymin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_ymin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_ymin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Face on ymax
idx = this->Index(i, 0, k);
idxface = this->IndexBCy(k, i);
this->fsend_ymax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->fsend_ymax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_ymax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_ymax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->fsend_ymax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
}
}
}
if (this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
// Face on zmin
idx = this->Index(i, j, this->nz - 1);
idxface = this->IndexBCz(i, j);
this->fsend_zmin[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->fsend_zmin[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_zmin[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_zmin[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_zmin[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Face on zmax
idx = this->Index(i, j, 0);
idxface = this->IndexBCz(i, j);
this->fsend_zmax[idxface*5 + 0] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->fsend_zmax[idxface*5 + 1] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_zmax[idxface*5 + 2] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_zmax[idxface*5 + 3] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->fsend_zmax[idxface*5 + 4] = this->f[D3Q15<T>::IndexF(idx, 13)];
}
}
}
if (this->my != 1 || this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
// Edge on ymin and zmin
idx = this->Index(i, this->ny - 1, this->nz - 1);
this->fsend_ymin_zmin[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_ymin_zmin[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 8)];
// Edge on ymin and zmax
idx = this->Index(i, this->ny - 1, 0);
this->fsend_ymin_zmax[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_ymin_zmax[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Edge on ymax and zmin
idx = this->Index(i, 0, this->nz - 1);
this->fsend_ymax_zmin[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_ymax_zmin[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Edge on ymax and zmax
idx = this->Index(i, 0, 0);
this->fsend_ymax_zmax[i*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_ymax_zmax[i*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
}
}
if (this->mz != 1 || this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
// Edge on zmin and xmin
idx = this->Index(this->nx - 1, j, this->nz - 1);
this->fsend_zmin_xmin[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_zmin_xmin[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 9)];
// Edge on zmin and xmax
idx = this->Index(0, j, this->nz - 1);
this->fsend_zmin_xmax[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_zmin_xmax[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
// Edge on zmax and xmin
idx = this->Index(this->nx - 1, j, 0);
this->fsend_zmax_xmin[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->fsend_zmax_xmin[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Edge on zmax and xmax
idx = this->Index(0, j, 0);
this->fsend_zmax_xmax[j*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_zmax_xmax[j*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
}
}
if (this->mx != 1 || this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
// Edge on xmin and ymin
idx = this->Index(this->nx - 1, this->ny - 1, k);
this->fsend_xmin_ymin[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->fsend_xmin_ymin[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 10)];
// Edge on xmin and ymax
idx = this->Index(this->nx - 1, 0, k);
this->fsend_xmin_ymax[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->fsend_xmin_ymax[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 12)];
// Edge on xmax and ymin
idx = this->Index(0, this->ny - 1, k);
this->fsend_xmax_ymin[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->fsend_xmax_ymin[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 13)];
// Edge on xmax and ymax
idx = this->Index(0, 0, k);
this->fsend_xmax_ymax[k*2 + 0] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->fsend_xmax_ymax[k*2 + 1] = this->f[D3Q15<T>::IndexF(idx, 14)];
}
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
this->fsend_corner[0] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, this->nz - 1), 7)]; // Corner at xmin, ymin and zmin
this->fsend_corner[1] = this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, this->nz - 1), 8)]; // Corner at xmax, ymin and zmin
this->fsend_corner[2] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, this->nz - 1), 9)]; // Corner at xmin, ymax and zmin
this->fsend_corner[3] = this->f[D3Q15<T>::IndexF(this->Index(0, 0, this->nz - 1), 14)]; // Corner at xmax, ymax and zmin
this->fsend_corner[4] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, 0), 10)]; // Corner at xmin, ymin and zmax
this->fsend_corner[5] = this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, 0), 13)]; // Corner at xmax, ymin and zmax
this->fsend_corner[6] = this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, 0), 12)]; // Corner at xmin, ymax and zmax
this->fsend_corner[7] = this->f[D3Q15<T>::IndexF(this->Index(0, 0, 0), 11)]; // Corner at xmax, ymax and zmax
}
// Communicate with other PE
int neib = 0;
if (this->mx != 1) {
// To xmin
MPI_Isend(this->fsend_xmin, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz), 0, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz), 0, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax
MPI_Isend(this->fsend_xmax, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz), 1, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin, this->ny*this->nz*5, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz), 1, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->my != 1) {
// To ymin
MPI_Isend(this->fsend_ymin, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz), 2, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz), 2, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax
MPI_Isend(this->fsend_ymax, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz), 3, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin, this->nz*this->nx*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz), 3, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mz != 1) {
// To zmin
MPI_Isend(this->fsend_zmin, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz - 1), 4, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz + 1), 4, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax
MPI_Isend(this->fsend_zmax, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz + 1), 5, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin, this->nx*this->ny*5, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy, this->PEz - 1), 5, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->my != 1 || this->mz != 1) {
// To ymin and zmin
MPI_Isend(this->fsend_ymin_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz - 1), 6, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz + 1), 6, MPI_COMM_WORLD, &this->request[neib++]);
// To ymin and zmax
MPI_Isend(this->fsend_ymin_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz + 1), 7, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymax_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz - 1), 7, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax and zmin
MPI_Isend(this->fsend_ymax_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz - 1), 8, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz + 1), 8, MPI_COMM_WORLD, &this->request[neib++]);
// To ymax and zmax
MPI_Isend(this->fsend_ymax_zmax, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy + 1, this->PEz + 1), 9, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_ymin_zmin, this->nx*2, MPI_DOUBLE, this->IndexPE(this->PEx, this->PEy - 1, this->PEz - 1), 9, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mz != 1 || this->mx != 1) {
// To zmin and xmin
MPI_Isend(this->fsend_zmin_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz - 1), 10, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz + 1), 10, MPI_COMM_WORLD, &this->request[neib++]);
// To zmin and xmax
MPI_Isend(this->fsend_zmin_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz - 1), 11, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmax_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz + 1), 11, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax and xmin
MPI_Isend(this->fsend_zmax_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz + 1), 12, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz - 1), 12, MPI_COMM_WORLD, &this->request[neib++]);
// To zmax and xmax
MPI_Isend(this->fsend_zmax_xmax, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy, this->PEz + 1), 13, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_zmin_xmin, this->ny*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy, this->PEz - 1), 13, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mx != 1 || this->my != 1) {
// To xmin and ymin
MPI_Isend(this->fsend_xmin_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz), 14, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz), 14, MPI_COMM_WORLD, &this->request[neib++]);
// To xmin and ymax
MPI_Isend(this->fsend_xmin_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz), 15, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmax_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz), 15, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax and ymin
MPI_Isend(this->fsend_xmax_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz), 16, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz), 16, MPI_COMM_WORLD, &this->request[neib++]);
// To xmax and ymax
MPI_Isend(this->fsend_xmax_ymax, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz), 17, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(this->frecv_xmin_ymin, this->nz*2, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz), 17, MPI_COMM_WORLD, &this->request[neib++]);
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
MPI_Isend(&this->fsend_corner[0], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz - 1), 18, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[7], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz - 1), 18, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymin and zmin
MPI_Isend(&this->fsend_corner[1], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz - 1), 19, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[6], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz - 1), 19, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymin and zmin
MPI_Isend(&this->fsend_corner[2], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz - 1), 20, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[5], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz - 1), 20, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymax and zmin
MPI_Isend(&this->fsend_corner[3], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz - 1), 21, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[4], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz - 1), 21, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymax and zmin
MPI_Isend(&this->fsend_corner[4], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz + 1), 22, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[3], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy - 1, this->PEz + 1), 22, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymin and zmax
MPI_Isend(&this->fsend_corner[5], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz + 1), 23, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[2], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy - 1, this->PEz + 1), 23, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymin and zmax
MPI_Isend(&this->fsend_corner[6], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz + 1), 24, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[1], 1, MPI_DOUBLE, this->IndexPE(this->PEx - 1, this->PEy + 1, this->PEz + 1), 24, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmin, ymax and zmax
MPI_Isend(&this->fsend_corner[7], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz + 1), 25, MPI_COMM_WORLD, &this->request[neib++]);
MPI_Irecv(&this->frecv_corner[0], 1, MPI_DOUBLE, this->IndexPE(this->PEx + 1, this->PEy + 1, this->PEz + 1), 25, MPI_COMM_WORLD, &this->request[neib++]); // Corner at xmax, ymax and zmax
}
if (neib > 0) {
MPI_Waitall(neib, this->request, this->status);
}
// Copy to f from frecv along edge or at corner
if (this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
// Face on xmin
idx = this->Index(0, j, k);
idxface = this->IndexBCx(j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->frecv_xmin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_xmin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_xmin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_xmin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_xmin[idxface*5 + 4];
// Face on xmax
idx = this->Index(this->nx - 1, j, k);
idxface = this->IndexBCx(j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->frecv_xmax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_xmax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_xmax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_xmax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_xmax[idxface*5 + 4];
}
}
}
if (this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
// Face on ymin
idx = this->Index(i, 0, k);
idxface = this->IndexBCy(k, i);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->frecv_ymin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_ymin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_ymin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_ymin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_ymin[idxface*5 + 4];
// Face on ymax
idx = this->Index(i, this->ny - 1, k);
idxface = this->IndexBCy(k, i);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->frecv_ymax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_ymax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_ymax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_ymax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_ymax[idxface*5 + 4];
}
}
}
if (this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
// Face on zmin
idx = this->Index(i, j, 0);
idxface = this->IndexBCz(i, j);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->frecv_zmin[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_zmin[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_zmin[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_zmin[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_zmin[idxface*5 + 4];
// Face on zmax
idx = this->Index(i, j, this->nz - 1);
idxface = this->IndexBCz(i, j);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->frecv_zmax[idxface*5 + 0];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_zmax[idxface*5 + 1];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_zmax[idxface*5 + 2];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_zmax[idxface*5 + 3];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_zmax[idxface*5 + 4];
}
}
}
if (this->my != 1 || this->mz != 1) {
for (int i = 0; i < this->nx; ++i) {
// Edge on ymin and zmin
idx = this->Index(i, 0, 0);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_ymin_zmin[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_ymin_zmin[i*2 + 1];
// Edge on ymin and zmax
idx = this->Index(i, 0, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_ymin_zmax[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_ymin_zmax[i*2 + 1];
// Edge on ymax and zmin
idx = this->Index(i, this->ny - 1, 0);
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_ymax_zmin[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_ymax_zmin[i*2 + 1];
// Edge on ymax and zmax
idx = this->Index(i, this->ny - 1, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_ymax_zmax[i*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_ymax_zmax[i*2 + 1];
}
}
if (this->mz != 1 || this->mx != 1) {
for (int j = 0; j < this->ny; ++j) {
// Edge on zmin and xmin
idx = this->Index(0, j, 0);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_zmin_xmin[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_zmin_xmin[j*2 + 1];
// Edge on zmin and xmax
idx = this->Index(this->nx - 1, j, 0);
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_zmin_xmax[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_zmin_xmax[j*2 + 1];
// Edge on zmax and xmin
idx = this->Index(0, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_zmax_xmin[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_zmax_xmin[j*2 + 1];
// Edge on zmax and xmax
idx = this->Index(this->nx - 1, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_zmax_xmax[j*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_zmax_xmax[j*2 + 1];
}
}
if (this->mx != 1 || this->my != 1) {
for (int k = 0; k < this->nz; ++k) {
// Edge on xmin and ymin
idx = this->Index(0, 0, k);
this->f[D3Q15<T>::IndexF(idx, 11)] = this->frecv_xmin_ymin[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->frecv_xmin_ymin[k*2 + 1];
// Edge on xmin and ymax
idx = this->Index(0, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 8)] = this->frecv_xmin_ymax[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->frecv_xmin_ymax[k*2 + 1];
// Edge on xmax and ymin
idx = this->Index(this->nx - 1, 0, k);
this->f[D3Q15<T>::IndexF(idx, 9)] = this->frecv_xmax_ymin[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->frecv_xmax_ymin[k*2 + 1];
// Edge on xmax and ymax
idx = this->Index(this->nx - 1, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 7)] = this->frecv_xmax_ymax[k*2 + 0];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->frecv_xmax_ymax[k*2 + 1];
}
}
if (this->mx != 1 || this->my != 1 || this->mz != 1) {
this->f[D3Q15<T>::IndexF(this->Index(0, 0, 0), 11)] = this->frecv_corner[0]; // Corner at xmin, ymin and zmin
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, 0), 12)] = this->frecv_corner[1]; // Corner at xmax, ymin and zmin
this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, 0), 13)] = this->frecv_corner[2]; // Corner at xmin, ymax and zmin
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, 0), 10)] = this->frecv_corner[3]; // Corner at xmax, ymax and zmin
this->f[D3Q15<T>::IndexF(this->Index(0, 0, this->nz - 1), 14)] = this->frecv_corner[4]; // Corner at xmin, ymin and zmax
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, 0, this->nz - 1), 9)] = this->frecv_corner[5]; // Corner at xmax, ymin and zmax
this->f[D3Q15<T>::IndexF(this->Index(0, this->ny - 1, this->nz - 1), 8)] = this->frecv_corner[6]; // Corner at xmin, ymax and zmax
this->f[D3Q15<T>::IndexF(this->Index(this->nx - 1, this->ny - 1, this->nz - 1), 7)] = this->frecv_corner[7]; // Corner at xmax, ymax and zmax
}
#endif
}
template<class T>
template<class Ff>
void D3Q15<T>::BoundaryCondition(Ff _bctype) {
// On xmin
if (this->PEx == 0) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
if (_bctype(0 + this->offsetx, j + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(0, j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
} else if (_bctype(0 + this->offsetx, j + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(0, j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
// On xmax
if (this->PEx == this->mx - 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
if (_bctype((this->nx - 1) + this->offsetx, j + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(this->nx - 1, j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype((this->nx - 1) + this->offsetx, j + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(this->nx - 1, j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 9)];
}
}
}
}
// On ymin
if (this->PEy == 0) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
if (_bctype(i + this->offsetx, 0 + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(i, 0, k);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
} else if (_bctype(i + this->offsetx, 0 + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(i, 0, k);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
// On ymax
if (this->PEy == this->my - 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
if (_bctype(i + this->offsetx, (this->ny - 1) + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(i, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype(i + this->offsetx, (this->ny - 1) + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(i, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 8)];
}
}
}
}
// On zmin
if (this->PEz == 0) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
if (_bctype(i + this->offsetx, j + this->offsety, 0 + this->offsetz) == BARRIER) {
int idx = this->Index(i, j, 0);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype(i + this->offsetx, j + this->offsety, 0 + this->offsetz) == MIRROR) {
int idx = this->Index(i, j, 0);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
// On zmax
if (this->PEz == this->mz - 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
if (_bctype(i + this->offsetx, j + this->offsety, (this->nz - 1) + this->offsetz) == BARRIER) {
int idx = this->Index(i, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
} else if (_bctype(i + this->offsetx, j + this->offsety, (this->nz - 1) + this->offsetz) == MIRROR) {
int idx = this->Index(i, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 8)];
}
}
}
}
}
template<class T>
template<class Ff>
void D3Q15<T>::iBoundaryCondition(Ff _bctype) {
// On xmin
if (this->PEx == 0) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
if (_bctype(0 + this->offsetx, j + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(0, j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype(0 + this->offsetx, j + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(0, j, k);
this->f[D3Q15<T>::IndexF(idx, 4)] = this->f[D3Q15<T>::IndexF(idx, 1)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 9)];
}
}
}
}
// On xmax
if (this->PEx == this->mx - 1) {
for (int j = 0; j < this->ny; ++j) {
for (int k = 0; k < this->nz; ++k) {
if (_bctype((this->nx - 1) + this->offsetx, j + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(this->nx - 1, j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
} else if (_bctype((this->nx - 1) + this->offsetx, j + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(this->nx - 1, j, k);
this->f[D3Q15<T>::IndexF(idx, 1)] = this->f[D3Q15<T>::IndexF(idx, 4)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
// On ymin
if (this->PEy == 0) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
if (_bctype(i + this->offsetx, 0 + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(i, 0, k);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype(i + this->offsetx, 0 + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(i, 0, k);
this->f[D3Q15<T>::IndexF(idx, 5)] = this->f[D3Q15<T>::IndexF(idx, 2)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 8)];
}
}
}
}
// On ymax
if (this->PEy == this->my - 1) {
for (int k = 0; k < this->nz; ++k) {
for (int i = 0; i < this->nx; ++i) {
if (_bctype(i + this->offsetx, (this->ny - 1) + this->offsety, k + this->offsetz) == BARRIER) {
int idx = this->Index(i, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
} else if (_bctype(i + this->offsetx, (this->ny - 1) + this->offsety, k + this->offsetz) == MIRROR) {
int idx = this->Index(i, this->ny - 1, k);
this->f[D3Q15<T>::IndexF(idx, 2)] = this->f[D3Q15<T>::IndexF(idx, 5)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
// On zmin
if (this->PEz == 0) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
if (_bctype(i + this->offsetx, j + this->offsety, 0 + this->offsetz) == BARRIER) {
int idx = this->Index(i, j, 0);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 8)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 9)];
} else if (_bctype(i + this->offsetx, j + this->offsety, 0 + this->offsetz) == MIRROR) {
int idx = this->Index(i, j, 0);
this->f[D3Q15<T>::IndexF(idx, 6)] = this->f[D3Q15<T>::IndexF(idx, 3)];
this->f[D3Q15<T>::IndexF(idx, 10)] = this->f[D3Q15<T>::IndexF(idx, 7)];
this->f[D3Q15<T>::IndexF(idx, 11)] = this->f[D3Q15<T>::IndexF(idx, 14)];
this->f[D3Q15<T>::IndexF(idx, 12)] = this->f[D3Q15<T>::IndexF(idx, 9)];
this->f[D3Q15<T>::IndexF(idx, 13)] = this->f[D3Q15<T>::IndexF(idx, 8)];
}
}
}
}
// On zmax
if (this->PEz == this->mz - 1) {
for (int i = 0; i < this->nx; ++i) {
for (int j = 0; j < this->ny; ++j) {
if (_bctype(i + this->offsetx, j + this->offsety, (this->nz - 1) + this->offsetz) == BARRIER) {
int idx = this->Index(i, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 11)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 10)];
} else if (_bctype(i + this->offsetx, j + this->offsety, (this->nz - 1) + this->offsetz) == MIRROR) {
int idx = this->Index(i, j, this->nz - 1);
this->f[D3Q15<T>::IndexF(idx, 3)] = this->f[D3Q15<T>::IndexF(idx, 6)];
this->f[D3Q15<T>::IndexF(idx, 7)] = this->f[D3Q15<T>::IndexF(idx, 10)];
this->f[D3Q15<T>::IndexF(idx, 8)] = this->f[D3Q15<T>::IndexF(idx, 13)];
this->f[D3Q15<T>::IndexF(idx, 9)] = this->f[D3Q15<T>::IndexF(idx, 12)];
this->f[D3Q15<T>::IndexF(idx, 14)] = this->f[D3Q15<T>::IndexF(idx, 11)];
}
}
}
}
}
template<class T>
void D3Q15<T>::SmoothCorner() {
// Along line ymin and zmin
if (this->PEy == 0 && this->PEz == 0) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, 0, 0), idxy = this->Index(i, 1, 0), idxz = this->Index(i, 0, 1);
this->f0[idx] = 0.5*(this->f0[idxy] + this->f0[idxz]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)]);
}
}
}
// Along line ymax and zmin
if (this->PEy == this->my - 1 && this->PEz == 0) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, this->ny - 1, 0), idxy = this->Index(i, this->ny - 2, 0), idxz = this->Index(i, this->ny - 1, 1);
this->f0[idx] = 0.5*(this->f0[idxy] + this->f0[idxz]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)]);
}
}
}
// Along line ymax and zmax
if (this->PEy == this->my - 1 && this->PEz == this->mz - 1) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, this->ny - 1, this->nz - 1), idxy = this->Index(i, this->ny - 2, this->nz - 1), idxz = this->Index(i, this->ny - 1, this->nz - 2);
this->f0[idx] = 0.5*(this->f0[idxy] + this->f0[idxz]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)]);
}
}
}
// Along line ymin and zmax
if (this->PEy == 0 && this->PEz == this->mz - 1) {
for (int i = 0; i < this->nx; ++i) {
int idx = this->Index(i, 0, this->nz - 1), idxy = this->Index(i, 1, this->nz - 1), idxz = this->Index(i, 0, this->nz - 2);
this->f0[idx] = 0.5*(this->f0[idxy] + this->f0[idxz]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)]);
}
}
}
// Along line zmin and xmin
if (this->PEz == 0 && this->PEx == 0) {
for (int j = 0; j < this->ny; ++j) {
int idx = this->Index(0, j, 0), idxz = this->Index(1, j, 0), idxx = this->Index(0, j, 1);
this->f0[idx] = 0.5*(this->f0[idxz] + this->f0[idxx]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxz, c)] + this->f[D3Q15<T>::IndexF(idxx, c)]);
}
}
}
// Along line zmax and xmin
if (this->PEz == this->mz - 1 && this->PEx == 0) {
for (int j = 0; j < this->ny; ++j) {
int idx = this->Index(0, j, this->nz - 1), idxz = this->Index(0, j, this->nz - 2), idxx = this->Index(1, j, this->nz - 1);
this->f0[idx] = 0.5*(this->f0[idxz] + this->f0[idxx]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxz, c)] + this->f[D3Q15<T>::IndexF(idxx, c)]);
}
}
}
// Along line zmax and xmax
if (this->PEz == this->mz - 1 && this->PEx == this->mx - 1) {
for (int j = 0; j < this->ny; ++j) {
int idx = this->Index(this->nx - 1, j, this->nz - 1), idxz = this->Index(this->nx - 1, j, this->nz - 2), idxx = this->Index(this->nx - 2, j, this->nz - 1);
this->f0[idx] = 0.5*(this->f0[idxz] + this->f0[idxx]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxz, c)] + this->f[D3Q15<T>::IndexF(idxx, c)]);
}
}
}
// Along line zmin and xmax
if (this->PEz == 0 && this->PEx == this->mx - 1) {
for (int j = 0; j < this->ny; ++j) {
int idx = this->Index(this->nx - 1, j, 0), idxz = this->Index(this->nx - 1, j, 1), idxx = this->Index(this->nx - 2, j, 0);
this->f0[idx] = 0.5*(this->f0[idxz] + this->f0[idxx]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxz, c)] + this->f[D3Q15<T>::IndexF(idxx, c)]);
}
}
}
// Along line xmin and ymin
if (this->PEx == 0 && this->PEy == 0) {
for (int k = 0; k < this->nz; ++k) {
int idx = this->Index(0, 0, k), idxx = this->Index(1, 0, k), idxy = this->Index(0, 1, k);
this->f0[idx] = 0.5*(this->f0[idxx] + this->f0[idxy]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)]);
}
}
}
// Along line xmax and ymin
if (this->PEx == this->mx - 1 && this->PEy == 0) {
for (int k = 0; k < this->nz; ++k) {
int idx = this->Index(this->nx - 1, 0, k), idxx = this->Index(this->nx - 2, 0, k), idxy = this->Index(this->nx - 1, 1, k);
this->f0[idx] = 0.5*(this->f0[idxx] + this->f0[idxy]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)]);
}
}
}
// Along line xmax and ymax
if (this->PEx == this->mx - 1 && this->PEy == this->my - 1) {
for (int k = 0; k < this->nz; ++k) {
int idx = this->Index(this->nx - 1, this->ny - 1, k), idxx = this->Index(this->nx - 2, this->ny - 1, k), idxy = this->Index(this->nx - 1, this->ny - 2, k);
this->f0[idx] = 0.5*(this->f0[idxx] + this->f0[idxy]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)]);
}
}
}
// Along line xmin and ymax
if (this->PEx == 0 && this->PEy == this->my - 1) {
for (int k = 0; k < this->nz; ++k) {
int idx = this->Index(0, this->ny - 1, k), idxx = this->Index(1, this->ny - 1, k), idxy = this->Index(0, this->ny - 2, k);
this->f0[idx] = 0.5*(this->f0[idxx] + this->f0[idxy]);
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = 0.5*(this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)]);
}
}
}
// Corner at xmin, ymin and zmin
if (this->PEx == 0 && this->PEy == 0 && this->PEz == 0) {
int idx = this->Index(0, 0, 0), idxx = this->Index(1, 0, 0), idxy = this->Index(0, 1, 0), idxz = this->Index(0, 0, 1);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmax, ymin and zmin
if (this->PEx == this->mx - 1 && this->PEy == 0 && this->PEz == 0) {
int idx = this->Index(this->nx - 1, 0, 0), idxx = this->Index(this->nx - 2, 0, 0), idxy = this->Index(this->nx - 1, 1, 0), idxz = this->Index(this->nx - 1, 0, 1);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmax, ymax and zmin
if (this->PEx == this->mx - 1 && this->PEy == this->my - 1 && this->PEz == 0) {
int idx = this->Index(this->nx - 1, this->ny - 1, 0), idxx = this->Index(this->nx - 2, this->ny - 1, 0), idxy = this->Index(this->nx - 1, this->ny - 2, 0), idxz = this->Index(this->nx - 1, this->ny - 1, 1);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmin, ymax and zmin
if (this->PEx == 0 && this->PEy == this->my - 1 && this->PEz == 0) {
int idx = this->Index(0, this->ny - 1, 0), idxx = this->Index(1, this->ny - 1, 0), idxy = this->Index(0, this->ny - 2, 0), idxz = this->Index(0, this->ny - 1, 1);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmin, ymin and zmax
if (this->PEx == 0 && this->PEy == 0 && this->PEz == this->mz - 1) {
int idx = this->Index(0, 0, this->nz - 1), idxx = this->Index(1, 0, this->nz - 1), idxy = this->Index(0, 1, this->nz - 1), idxz = this->Index(0, 0, this->nz - 2);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmax, ymin and zmax
if (this->PEx == this->mx - 1 && this->PEy == 0 && this->PEz == this->mz - 1) {
int idx = this->Index(this->nx - 1, 0, this->nz - 1), idxx = this->Index(this->nx - 2, 0, this->nz - 1), idxy = this->Index(this->nx - 1, 1, this->nz - 1), idxz = this->Index(this->nx - 1, 0, this->nz - 2);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmax, ymax and zmax
if (this->PEx == this->mx - 1 && this->PEy == this->my - 1 && this->PEz == this->mz - 1) {
int idx = this->Index(this->nx - 1, this->ny - 1, this->nz - 1), idxx = this->Index(this->nx - 2, this->ny - 1, this->nz - 1), idxy = this->Index(this->nx - 1, this->ny - 2, this->nz - 1), idxz = this->Index(this->nx - 1, this->ny - 1, this->nz - 2);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
// Corner at xmin, ymax and zmax
if (this->PEx == 0 && this->PEy == this->my - 1 && this->PEz == this->mz - 1) {
int idx = this->Index(0, this->ny - 1, this->nz - 1), idxx = this->Index(1, this->ny - 1, this->nz - 1), idxy = this->Index(0, this->ny - 2, this->nz - 1), idxz = this->Index(0, this->ny - 1, this->nz - 2);
this->f0[idx] = (this->f0[idxx] + this->f0[idxy] + this->f0[idxz])/3.0;
for (int c = 1; c < D3Q15<T>::nc; ++c) {
this->f[D3Q15<T>::IndexF(idx, c)] = (this->f[D3Q15<T>::IndexF(idxx, c)] + this->f[D3Q15<T>::IndexF(idxy, c)] + this->f[D3Q15<T>::IndexF(idxz, c)])/3.0;
}
}
}
#ifdef _USE_AVX_DEFINES
template<>__m256d D3Q15<double>::__cx[D3Q15<double>::nc] = { 0 };
template<>__m256d D3Q15<double>::__cy[D3Q15<double>::nc] = { 0 };
template<>__m256d D3Q15<double>::__cz[D3Q15<double>::nc] = { 0 };
template<>__m256d D3Q15<double>::__ei[D3Q15<double>::nc] = { 0 };
template<>
void D3Q15<double>::LoadCxCyCzEi() {
for (int c = 0; c < D3Q15<double>::nc; ++c) {
D3Q15<double>::__cx[c] = _mm256_set1_pd((double)D3Q15<double>::cx[c]);
D3Q15<double>::__cy[c] = _mm256_set1_pd((double)D3Q15<double>::cy[c]);
D3Q15<double>::__cz[c] = _mm256_set1_pd((double)D3Q15<double>::cz[c]);
D3Q15<double>::__ei[c] = _mm256_set1_pd((double)D3Q15<double>::ei[c]);
}
}
template<>
template<>
void D3Q15<double>::LoadF<__m256d>(int _idx, __m256d *__f) {
const int offsetf = D3Q15<double>::IndexF(_idx, 1);
__m256d load0 = _mm256_load_pd(&this->f[offsetf + 0]); // f 4(0) f 3(0) f 2(0) f 1(0)
__m256d load1 = _mm256_load_pd(&this->f[offsetf + 4]); // f 8(0) f 7(0) f 6(0) f 5(0)
__m256d load2 = _mm256_load_pd(&this->f[offsetf + 8]); // f12(0) f11(0) f10(0) f 9(0)
__m256d load3 = _mm256_load_pd(&this->f[offsetf + 12]); // f 2(1) f 1(1) f14(0) f13(0)
__m256d load4 = _mm256_load_pd(&this->f[offsetf + 16]); // f 6(1) f 5(1) f 4(1) f 3(1)
__m256d load5 = _mm256_load_pd(&this->f[offsetf + 20]); // f10(1) f 9(1) f 8(1) f 7(1)
__m256d load6 = _mm256_load_pd(&this->f[offsetf + 24]); // f14(1) f13(1) f12(1) f11(1)
__m256d load7 = _mm256_load_pd(&this->f[offsetf + 28]); // f 4(2) f 3(2) f 2(2) f 1(2)
__m256d load8 = _mm256_load_pd(&this->f[offsetf + 32]); // f 8(2) f 7(2) f 6(2) f 5(2)
__m256d load9 = _mm256_load_pd(&this->f[offsetf + 36]); // f12(2) f11(2) f10(2) f 9(2)
__m256d load10 = _mm256_load_pd(&this->f[offsetf + 40]); // f 2(3) f 1(3) f14(2) f13(2)
__m256d load11 = _mm256_load_pd(&this->f[offsetf + 44]); // f 6(3) f 5(3) f 4(3) f 3(3)
__m256d load12 = _mm256_load_pd(&this->f[offsetf + 48]); // f10(3) f 9(3) f 8(3) f 7(3)
__m256d load13 = _mm256_load_pd(&this->f[offsetf + 52]); // f14(3) f13(3) f12(3) f11(3)
const int mm0 = 2*16 + 0*1, mm1 = 3*16 + 1*1;
__m256d permute0 = _mm256_permute2f128_pd(load0, load7, mm0); // f 2(2) f 1(2) f 2(0) f 1(0)
__m256d permute1 = _mm256_permute2f128_pd(load0, load7, mm1); // f 4(2) f 3(2) f 4(0) f 3(0)
__m256d permute2 = _mm256_permute2f128_pd(load1, load8, mm0); // f 6(2) f 5(2) f 6(0) f 5(0)
__m256d permute3 = _mm256_permute2f128_pd(load1, load8, mm1); // f 8(2) f 7(2) f 8(0) f 7(0)
__m256d permute4 = _mm256_permute2f128_pd(load2, load9, mm0); // f10(2) f 9(2) f10(0) f 9(0)
__m256d permute5 = _mm256_permute2f128_pd(load2, load9, mm1); // f12(2) f11(2) f12(0) f11(0)
__m256d permute6 = _mm256_permute2f128_pd(load3, load10, mm0); // f14(2) f13(2) f14(0) f13(0)
__m256d permute7 = _mm256_permute2f128_pd(load3, load10, mm1); // f 2(3) f 1(3) f 2(1) f 1(1)
__m256d permute8 = _mm256_permute2f128_pd(load4, load11, mm0); // f 4(3) f 3(3) f 4(1) f 3(1)
__m256d permute9 = _mm256_permute2f128_pd(load4, load11, mm1); // f 6(3) f 5(3) f 6(1) f 5(1)
__m256d permute10 = _mm256_permute2f128_pd(load5, load12, mm0); // f 8(3) f 7(3) f 8(1) f 7(1)
__m256d permute11 = _mm256_permute2f128_pd(load5, load12, mm1); // f10(3) f 9(3) f10(1) f 9(1)
__m256d permute12 = _mm256_permute2f128_pd(load6, load13, mm0); // f12(3) f11(3) f12(1) f11(1)
__m256d permute13 = _mm256_permute2f128_pd(load6, load13, mm1); // f14(3) f13(3) f14(1) f13(1)
__f[ 0] = _mm256_load_pd(&this->f0[_idx]); // f 0(3) f 0(2) f 0(1) f 0(0)
__f[ 1] = _mm256_unpacklo_pd(permute0, permute7); // f 1(3) f 1(2) f 1(1) f 1(0)
__f[ 2] = _mm256_unpackhi_pd(permute0, permute7); // f 2(3) f 2(2) f 2(1) f 2(0)
__f[ 3] = _mm256_unpacklo_pd(permute1, permute8); // f 3(3) f 3(2) f 3(1) f 3(0)
__f[ 4] = _mm256_unpackhi_pd(permute1, permute8); // f 4(3) f 4(2) f 4(1) f 4(0)
__f[ 5] = _mm256_unpacklo_pd(permute2, permute9); // f 5(3) f 5(2) f 5(1) f 5(0)
__f[ 6] = _mm256_unpackhi_pd(permute2, permute9); // f 6(3) f 6(2) f 6(1) f 6(0)
__f[ 7] = _mm256_unpacklo_pd(permute3, permute10); // f 7(3) f 7(2) f 7(1) f 7(0)
__f[ 8] = _mm256_unpackhi_pd(permute3, permute10); // f 8(3) f 8(2) f 8(1) f 8(0)
__f[ 9] = _mm256_unpacklo_pd(permute4, permute11); // f 9(3) f 9(2) f 9(1) f 9(0)
__f[10] = _mm256_unpackhi_pd(permute4, permute11); // f10(3) f10(2) f10(1) f10(0)
__f[11] = _mm256_unpacklo_pd(permute5, permute12); // f11(3) f11(2) f11(1) f11(0)
__f[12] = _mm256_unpackhi_pd(permute5, permute12); // f12(3) f12(2) f12(1) f12(0)
__f[13] = _mm256_unpacklo_pd(permute6, permute13); // f13(3) f13(2) f13(1) f13(0)
__f[14] = _mm256_unpackhi_pd(permute6, permute13); // f14(3) f14(2) f14(1) f14(0)
}
template<>
template<>
void D3Q15<double>::StoreF<__m256d>(int _idx, const __m256d *__f) {
__m256d unpack0 = _mm256_unpacklo_pd(__f[1], __f[2]); // f 2(2) f 1(2) f 2(0) f 1(0)
__m256d unpack1 = _mm256_unpackhi_pd(__f[1], __f[2]); // f 2(3) f 1(3) f 2(1) f 1(1)
__m256d unpack2 = _mm256_unpacklo_pd(__f[3], __f[4]); // f 4(2) f 3(2) f 4(0) f 3(0)
__m256d unpack3 = _mm256_unpackhi_pd(__f[3], __f[4]); // f 4(3) f 3(3) f 4(1) f 3(1)
__m256d unpack4 = _mm256_unpacklo_pd(__f[5], __f[6]); // f 6(2) f 5(2) f 6(0) f 5(0)
__m256d unpack5 = _mm256_unpackhi_pd(__f[5], __f[6]); // f 6(3) f 5(3) f 6(1) f 5(1)
__m256d unpack6 = _mm256_unpacklo_pd(__f[7], __f[8]); // f 8(2) f 7(2) f 8(0) f 7(0)
__m256d unpack7 = _mm256_unpackhi_pd(__f[7], __f[8]); // f 8(3) f 7(3) f 8(1) f 7(1)
__m256d unpack8 = _mm256_unpacklo_pd(__f[9], __f[10]); // f10(2) f 9(2) f10(0) f 9(0)
__m256d unpack9 = _mm256_unpackhi_pd(__f[9], __f[10]); // f10(3) f 9(3) f10(1) f 9(1)
__m256d unpack10 = _mm256_unpacklo_pd(__f[11], __f[12]); // f12(2) f11(2) f12(0) f11(0)
__m256d unpack11 = _mm256_unpackhi_pd(__f[11], __f[12]); // f12(3) f11(3) f12(1) f11(1)
__m256d unpack12 = _mm256_unpacklo_pd(__f[13], __f[14]); // f14(2) f13(2) f14(0) f13(0)
__m256d unpack13 = _mm256_unpackhi_pd(__f[13], __f[14]); // f14(3) f13(3) f14(1) f13(1)
const int mm0 = 2*16 + 0*1, mm1 = 3*16 + 1*1, offsetf = D3Q15<double>::IndexF(_idx, 1);
_mm256_store_pd(&this->f0[_idx], __f[0]); // f 0(3) f 0(2) f 0(1) f 0(0)
_mm256_store_pd(&this->f[offsetf + 0], _mm256_permute2f128_pd(unpack0, unpack2, mm0)); // f 4(0) f 3(0) f 2(0) f 1(0)
_mm256_store_pd(&this->f[offsetf + 4], _mm256_permute2f128_pd(unpack4, unpack6, mm0)); // f 8(0) f 7(0) f 6(0) f 5(0)
_mm256_store_pd(&this->f[offsetf + 8], _mm256_permute2f128_pd(unpack8, unpack10, mm0)); // f12(0) f11(0) f10(0) f 9(0)
_mm256_store_pd(&this->f[offsetf + 12], _mm256_permute2f128_pd(unpack12, unpack1, mm0)); // f 2(1) f 1(1) f14(0) f13(0)
_mm256_store_pd(&this->f[offsetf + 16], _mm256_permute2f128_pd(unpack3, unpack5, mm0)); // f 6(1) f 5(1) f 4(1) f 3(1)
_mm256_store_pd(&this->f[offsetf + 20], _mm256_permute2f128_pd(unpack7, unpack9, mm0)); // f10(1) f 9(1) f 8(1) f 7(1)
_mm256_store_pd(&this->f[offsetf + 24], _mm256_permute2f128_pd(unpack11, unpack13, mm0)); // f14(1) f13(1) f12(1) f11(1)
_mm256_store_pd(&this->f[offsetf + 28], _mm256_permute2f128_pd(unpack0, unpack2, mm1)); // f 4(2) f 3(2) f 2(2) f 1(2)
_mm256_store_pd(&this->f[offsetf + 32], _mm256_permute2f128_pd(unpack4, unpack6, mm1)); // f 8(2) f 7(2) f 6(2) f 5(2)
_mm256_store_pd(&this->f[offsetf + 36], _mm256_permute2f128_pd(unpack8, unpack10, mm1)); // f12(2) f11(2) f10(2) f 9(2)
_mm256_store_pd(&this->f[offsetf + 40], _mm256_permute2f128_pd(unpack12, unpack1, mm1)); // f 2(3) f 1(3) f14(2) f13(2)
_mm256_store_pd(&this->f[offsetf + 44], _mm256_permute2f128_pd(unpack3, unpack5, mm1)); // f 6(3) f 5(3) f 4(3) f 3(3)
_mm256_store_pd(&this->f[offsetf + 48], _mm256_permute2f128_pd(unpack7, unpack9, mm1)); // f10(3) f 9(3) f 8(3) f 7(3)
_mm256_store_pd(&this->f[offsetf + 52], _mm256_permute2f128_pd(unpack11, unpack13, mm1)); // f14(3) f13(3) f12(3) f11(3)
}
#endif
} |
HashmapCPU.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <tbb/concurrent_unordered_map.h>
#include <unordered_map>
#include "open3d/core/hashmap/CPU/HashmapBufferCPU.hpp"
#include "open3d/core/hashmap/DeviceHashmap.h"
namespace open3d {
namespace core {
template <typename Hash, typename KeyEq>
class CPUHashmap : public DeviceHashmap<Hash, KeyEq> {
public:
CPUHashmap(int64_t init_buckets,
int64_t init_capacity,
int64_t dsize_key,
int64_t dsize_value,
const Device& device);
~CPUHashmap();
void Rehash(int64_t buckets) override;
void Insert(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Activate(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Find(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Erase(const void* input_keys,
bool* output_masks,
int64_t count) override;
int64_t GetActiveIndices(addr_t* output_indices) override;
int64_t Size() const override;
std::vector<int64_t> BucketSizes() const override;
float LoadFactor() const override;
protected:
std::shared_ptr<tbb::concurrent_unordered_map<void*, addr_t, Hash, KeyEq>>
impl_;
std::shared_ptr<CPUHashmapBufferContext> buffer_ctx_;
void InsertImpl(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count);
void Allocate(int64_t capacity, int64_t buckets);
};
template <typename Hash, typename KeyEq>
CPUHashmap<Hash, KeyEq>::CPUHashmap(int64_t init_buckets,
int64_t init_capacity,
int64_t dsize_key,
int64_t dsize_value,
const Device& device)
: DeviceHashmap<Hash, KeyEq>(
init_buckets,
init_capacity, /// Dummy for std unordered_map, reserved for.
/// other hashmaps.
dsize_key,
dsize_value,
device) {
Allocate(init_capacity, init_buckets);
}
template <typename Hash, typename KeyEq>
CPUHashmap<Hash, KeyEq>::~CPUHashmap() {}
template <typename Hash, typename KeyEq>
int64_t CPUHashmap<Hash, KeyEq>::Size() const {
return impl_->size();
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Insert(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
int64_t new_size = Size() + count;
if (new_size > this->capacity_) {
float avg_capacity_per_bucket =
float(this->capacity_) / float(this->bucket_count_);
int64_t expected_buckets = std::max(
this->bucket_count_ * 2,
int64_t(std::ceil(new_size / avg_capacity_per_bucket)));
Rehash(expected_buckets);
}
InsertImpl(input_keys, input_values, output_addrs, output_masks, count);
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Activate(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
int64_t new_size = Size() + count;
if (new_size > this->capacity_) {
float avg_capacity_per_bucket =
float(this->capacity_) / float(this->bucket_count_);
int64_t expected_buckets = std::max(
this->bucket_count_ * 2,
int64_t(std::ceil(new_size / avg_capacity_per_bucket)));
Rehash(expected_buckets);
}
InsertImpl(input_keys, nullptr, output_addrs, output_masks, count);
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Find(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
#pragma omp parallel for
for (int64_t i = 0; i < count; ++i) {
uint8_t* key = const_cast<uint8_t*>(
static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i);
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
output_addrs[i] = flag ? iter->second : 0;
}
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Erase(const void* input_keys,
bool* output_masks,
int64_t count) {
for (int64_t i = 0; i < count; ++i) {
uint8_t* key = const_cast<uint8_t*>(
static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i);
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
if (flag) {
buffer_ctx_->DeviceFree(iter->second);
impl_->unsafe_erase(iter);
}
}
this->bucket_count_ = impl_->unsafe_bucket_count();
}
template <typename Hash, typename KeyEq>
int64_t CPUHashmap<Hash, KeyEq>::GetActiveIndices(addr_t* output_indices) {
int64_t count = impl_->size();
int64_t i = 0;
for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) {
output_indices[i] = static_cast<int64_t>(iter->second);
}
return count;
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Rehash(int64_t buckets) {
int64_t iterator_count = Size();
Tensor active_keys;
Tensor active_values;
if (iterator_count > 0) {
Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_);
GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr()));
Tensor active_indices = active_addrs.To(Dtype::Int64);
active_keys = this->GetKeyBuffer().IndexGet({active_indices});
active_values = this->GetValueBuffer().IndexGet({active_indices});
}
float avg_capacity_per_bucket =
float(this->capacity_) / float(this->bucket_count_);
int64_t new_capacity =
int64_t(std::ceil(buckets * avg_capacity_per_bucket));
Allocate(new_capacity, buckets);
if (iterator_count > 0) {
Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_);
Tensor output_masks({iterator_count}, Dtype::Bool, this->device_);
InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(),
static_cast<addr_t*>(output_addrs.GetDataPtr()),
static_cast<bool*>(output_masks.GetDataPtr()),
iterator_count);
}
impl_->rehash(buckets);
this->bucket_count_ = impl_->unsafe_bucket_count();
}
template <typename Hash, typename KeyEq>
std::vector<int64_t> CPUHashmap<Hash, KeyEq>::BucketSizes() const {
int64_t bucket_count = impl_->unsafe_bucket_count();
std::vector<int64_t> ret;
for (int64_t i = 0; i < bucket_count; ++i) {
ret.push_back(impl_->unsafe_bucket_size(i));
}
return ret;
}
template <typename Hash, typename KeyEq>
float CPUHashmap<Hash, KeyEq>::LoadFactor() const {
return impl_->load_factor();
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::InsertImpl(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
#pragma omp parallel for
for (int64_t i = 0; i < count; ++i) {
const uint8_t* src_key =
static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i;
addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate();
auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr);
uint8_t* dst_key = static_cast<uint8_t*>(dst_kv_iter.first);
uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second);
std::memcpy(dst_key, src_key, this->dsize_key_);
if (input_values != nullptr) {
const uint8_t* src_value =
static_cast<const uint8_t*>(input_values) +
this->dsize_value_ * i;
std::memcpy(dst_value, src_value, this->dsize_value_);
} else {
std::memset(dst_value, 0, this->dsize_value_);
}
// Try insertion.
auto res = impl_->insert({dst_key, dst_kv_addr});
output_addrs[i] = dst_kv_addr;
output_masks[i] = res.second;
}
#pragma omp parallel for
for (int64_t i = 0; i < count; ++i) {
if (!output_masks[i]) {
buffer_ctx_->DeviceFree(output_addrs[i]);
}
}
this->bucket_count_ = impl_->unsafe_bucket_count();
}
template <typename Hash, typename KeyEq>
void CPUHashmap<Hash, KeyEq>::Allocate(int64_t capacity, int64_t buckets) {
this->capacity_ = capacity;
this->buffer_ =
std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_,
this->dsize_value_, this->device_);
buffer_ctx_ = std::make_shared<CPUHashmapBufferContext>(
this->capacity_, this->dsize_key_, this->dsize_value_,
this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(),
this->buffer_->GetHeap());
buffer_ctx_->Reset();
impl_ = std::make_shared<
tbb::concurrent_unordered_map<void*, addr_t, Hash, KeyEq>>(
buckets, Hash(this->dsize_key_), KeyEq(this->dsize_key_));
}
} // namespace core
} // namespace open3d
|
GB_binop__copysign_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__copysign_fp64
// A.*B function (eWiseMult): GB_AemultB__copysign_fp64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__copysign_fp64
// C+=b function (dense accum): GB_Cdense_accumb__copysign_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__copysign_fp64
// C=scalar+B GB_bind1st__copysign_fp64
// C=scalar+B' GB_bind1st_tran__copysign_fp64
// C=A+scalar GB_bind2nd__copysign_fp64
// C=A'+scalar GB_bind2nd_tran__copysign_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = copysign (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = copysign (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COPYSIGN || GxB_NO_FP64 || GxB_NO_COPYSIGN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__copysign_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__copysign_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__copysign_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__copysign_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__copysign_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__copysign_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = copysign (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__copysign_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = copysign (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (x, aij) ; \
}
GrB_Info GB_bind1st_tran__copysign_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = copysign (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__copysign_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__one_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_uint16_uint16
// op(A') function: GB_unop_tran__one_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cudanetworkexecutor.h | #pragma once
#include "networkexecutor.h"
#include "cudanetworkbatch.h"
namespace NEAT {
//---
//--- CLASS CudaNetworkExecutor
//---
template<typename Evaluator>
class CudaNetworkExecutor : public NetworkExecutor<Evaluator> {
std::vector<class CudaNetworkBatch<Evaluator> *> batches;
public:
CudaNetworkExecutor() {
int ndevices;
xcuda( cudaGetDeviceCount(&ndevices) );
errif(ndevices == 0, "No Cuda devices found!");
batches.resize(ndevices);
for(int i = 0; i < ndevices; i++) {
batches[i] = new CudaNetworkBatch<Evaluator>(i);
}
}
virtual ~CudaNetworkExecutor() {
for(size_t i = 0; i < batches.size(); i++) {
delete batches[i];
}
}
virtual void configure(const typename Evaluator::Config *config,
size_t len) {
for(size_t i = 0; i < batches.size(); i++) {
batches[i]->configure(config, len);
}
}
virtual void execute(class Network **nets_,
OrganismEvaluation *results,
size_t nnets) {
CudaNetwork **nets = (CudaNetwork **)nets_;
size_t nbatches = batches.size();
uint batch_size = nnets / nbatches;
#pragma omp parallel for
for(size_t ibatch = 0; ibatch < nbatches; ibatch++) {
size_t inet = ibatch * batch_size;
size_t n = batch_size;
if(ibatch == nbatches - 1)
n += nnets % batch_size;
batches[ibatch]->activate(nets + inet,
results + inet,
n,
NACTIVATES_PER_INPUT);
}
}
};
template<typename Evaluator>
inline NetworkExecutor<Evaluator> *NetworkExecutor<Evaluator>::create() {
return new CudaNetworkExecutor<Evaluator>();
}
} // namespace NEAT
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef RTABMAP_FLANN_NNINDEX_H
#define RTABMAP_FLANN_NNINDEX_H
#include <vector>
#include "rtflann/general.h"
#include "rtflann/util/matrix.h"
#include "rtflann/util/params.h"
#include "rtflann/util/result_set.h"
#include "rtflann/util/dynamic_bitset.h"
#include "rtflann/util/saving.h"
namespace rtflann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds the index using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
inline size_t removedCount() const
{
return removed_count_;
}
inline size_t sizeAtBuild() const
{
return size_at_build_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.h.data_type = flann_datatype_value<ElementType>::value;
header.h.index_type = getType();
header.h.rows = size_;
header.h.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strncmp(header.h.signature,
FLANN_SIGNATURE_,
strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.h.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.h.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
GB_binop__rdiv_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64)
// A*D function (colscale): GB (_AxD__rdiv_fc64)
// D*A function (rowscale): GB (_DxB__rdiv_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64)
// C=scalar+B GB (_bind1st__rdiv_fc64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc64)
// C=A+scalar GB (_bind2nd__rdiv_fc64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_div (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__asin_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fc32_fc32)
// op(A') function: GB (_unop_tran__asin_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = casinf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = casinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
commondraw.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : commondraw.c
* Description : common drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_commondraw_c__
#define __libaroma_commondraw_c__
#include <aroma_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Function : libaroma_draw_limit
* Return Value: int
* Descriptions: get limit position
*/
int libaroma_draw_limit(
int x, int max) {
if (x<0) {
return 0;
}
if (x>=max) {
return max-1;
}
return x;
} /* End of libaroma_draw_limit */
/*
* Function : libaroma_draw_limited
* Return Value: byte
* Descriptions: is draw position limited/overflow
*/
byte libaroma_draw_limited(
int x, int max) {
return ((x < 0) || (x >= max) ? 1 : 0);
} /* End of libaroma_draw_limited */
/*
* Function : libaroma_draw_ex2
* Return Value: byte
* Descriptions: canvas drawing
*/
byte libaroma_draw_ex2(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sw, int sh,
byte draw_flags,
byte opacity,
byte ismask,
word maskcolor
) {
if (src == NULL) {
ALOGW("libaroma_draw_ex1 src = NULL");
return 0;
}
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if ((dx >= dst->w) || (dy >= dst->h)) {
ALOGW("libaroma_draw_ex1 dx/dy bigger that destination size");
return 0;
}
if (opacity==0) {
return 1; /* No Need Any Process */
}
byte useAlpha = (draw_flags&LIBAROMA_DRAW_WITH_ALPHA)?1:0;
byte noDither = (draw_flags&LIBAROMA_DRAW_NODITHER)?1:0;
byte toBlack = (draw_flags&LIBAROMA_DRAW_TO_BLACK)?1:0;
/* fix positions */
if (sx < 0) {
dx += abs(sx);
sw -= abs(sx);
sx = 0;
}
if (sy < 0) {
dy += abs(sy);
sh -= abs(sy);
sy = 0;
}
/* fix size */
if (sw + sx >= src->w) {
sw -= (sw + sx) - src->w;
}
if (sh + sy >= src->h) {
sh -= (sh + sy) - src->h;
}
if ((sw <= 0) || (sh <= 0)) {
ALOGW("libaroma_draw_ex1 calculated sw/sh < 1");
return 0;
}
/* set calculated units */
int sr_w = sw;
int sr_h = sh;
int sr_x = sx;
int sr_y = sy;
int ds_x = dx;
int ds_y = dy;
/* fix destination */
if (dx < 0) {
int ndx = abs(dx);
sr_x += abs(ndx);
sr_w -= ndx;
ds_x = 0;
}
if (dy < 0) {
int ndy = abs(dy);
sr_y += ndy;
sr_h -= ndy;
ds_y = 0;
}
/* fix source size */
if (sr_w + dx > dst->w) {
sr_w -= (sr_w + dx) - dst->w;
}
if (sr_h + dy > dst->h) {
sr_h -= (sr_h + dy) - dst->h;
}
/* prepare loop data */
int y;
int pos_sr_x = sr_x * 2;
int pos_ds_x = ds_x * 2;
int pos_sc_w = src->l * 2;
int pos_dc_w = dst->l * 2;
int copy_sz = sr_w * 2;
byte * src_data = ((byte *) src->data);
byte * dst_data = ((byte *) dst->data);
if (useAlpha) {
if (src->alpha == NULL) {
useAlpha = 0;
}
}
if (!useAlpha){
ismask=0;
}
if (opacity == 0xff) {
if (useAlpha) {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data+((ds_y + y)*pos_dc_w)+pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, dst_mem, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (noDither){
libaroma_alpha_px(
sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
libaroma_alpha_px_line(
y, sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
}
}
}
else {
/* Copy Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
memcpy(
dst_data + ((ds_y + y)*pos_dc_w) + pos_ds_x,
src_data + ((sr_y + y)*pos_sc_w) + pos_sr_x,
copy_sz
);
}
}
}
else {
if (useAlpha) {
/* Blend Destination with Source */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp tmp_dst = (wordp) malloc(sr_w * 2);
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, tmp_dst, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (toBlack){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_black(sr_w, dst_mem, tmp_dst, opacity);
}
else if (noDither){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
libaroma_alpha_px_line(
y, sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
}
free(tmp_dst);
}
}
else {
/* Blend Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
wordp src_mem = (wordp) (src_data + ((sr_y + y) * pos_sc_w) + pos_sr_x);
if (toBlack){
libaroma_alpha_black(sr_w, dst_mem, src_mem, opacity);
}
else if (noDither){
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
else{
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
}
}
}
return 1;
} /* End of libaroma_draw_ex1 */
/*
* Function : libaroma_draw_rect
* Return Value: byte
* Descriptions: draw rectangle
*/
byte libaroma_draw_rect(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word color, byte alpha) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
/* fixed size */
w = x2 - x;
h = y2 - y;
/* draw */
int dy;
if (alpha == 0xff) {
wordp datapos = dst->data + x;
#ifdef libaroma_memset16
for (dy = y; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
libaroma_color_set(linepos,color,w);
}
#else
int w2=w*2;
wordp firstline = datapos + (y * dst->l);
libaroma_color_set(firstline, color, w);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y+1; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
memcpy(linepos,firstline,w2);
}
#endif
}
else {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y; dy < y2; dy++) {
wordp linepos = dst->data + (dy * dst->l) + x;
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(dy, w, linepos, linepos, color, alpha);
#else
libaroma_alpha_rgba_fill(w, linepos, linepos, color, alpha);
#endif
}
}
return 1;
} /* End of libaroma_draw_rect */
/*
* Function : libaroma_draw_pixel
* Return Value: byte
* Descriptions: draw pixel
*/
byte libaroma_draw_pixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
word color,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
wordp d=&dest->data[dest->l * dy + dx];
if (alpha==0xff){
*d = color;
}
else if (alpha>0){
*d = libaroma_alpha(*d,color,alpha);
}
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_alphapixel
* Return Value: byte
* Descriptions: set alpha pixel
*/
byte libaroma_draw_alphapixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
if (dest->alpha==NULL){
return 0;
}
dest->alpha[dest->l * dy + dx] = alpha;
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_line
* Return Value: byte
* Descriptions: draw line
*/
byte libaroma_draw_line(
LIBAROMA_CANVASP dest,
int x0, int y0, int x1, int y1,
float wd,
word color,
byte alpha,
byte is_mask){
#define __DRAW_PIX(x,y,a) \
if (is_mask==1){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(alpha,MAX(0, alpha * (1-(a)))) \
)) { break; } \
} \
else if (is_mask==2){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(0xff,MAX(0, 255 * (a))) \
)) { break; } \
} \
else{ \
if (!libaroma_draw_pixel( \
dest, x, y, color, \
MIN(0xff,MAX(0, alpha * (1-(a)))) \
)) { break; } \
}
if (!dest){
dest=libaroma_fb()->canvas;
}
int dx = abs(x1-x0), sx = x0 < x1 ? 1 : -1;
int dy = abs(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = dx-dy, e2, x2, y2;
float ed = dx+dy == 0 ? 1 : sqrt((float)dx*dx+(float)dy*dy);
for (wd = (wd+1)/2; ; ) {
if ((x0>=0)&&(y0>=0)){
__DRAW_PIX(x0,y0,
abs(err-dx+dy)/ed-wd+1
);
}
e2 = err; x2 = x0;
if (2*e2 >= -dx) {
for (e2 += dy, y2 = y0; e2 < ed*wd && (y1 != y2 || dx > dy); e2 += dx){
if ((x0>=0)&&(y2>=0)){
__DRAW_PIX(x0, y2+=sy,
abs(e2)/ed-wd+1
);
}
}
if (x0==x1){
break;
}
e2 = err; err -= dy; x0 += sx;
}
if (2*e2 <= dy){
for (e2 = dx-e2; e2 < ed*wd && (x1 != x2 || dx < dy); e2 += dy){
if ((x2>=0)&&(y0>=0)){
__DRAW_PIX(x2 += sx, y0,
abs(e2)/ed-wd+1
);
}
}
if (y0==y1){
break;
}
err += dx; y0 += sy;
}
}
#undef __DRAW_PIX
return 1;
} /* End of libaroma_draw_line */
/*
* Function : libaroma_draw_subpixel
* Return Value: byte
* Descriptions: draw subpixel
*/
byte libaroma_draw_subpixel(
LIBAROMA_CANVASP dest,
float dx, float dy, float tickness,
word color,
byte alpha){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<=-1)||(dy<=-1)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
int x, y;
float px, py;
float ht=(tickness-1.0)/2;
for (y=floor(dy-ht);y<=ceil(dy+ht);y++){
if ((y>=0)&&(y<dest->h)){
int pos = y * dest->l;
for (x=floor(dx-ht);x<=ceil(dx+ht);x++){
if ((x>=0)&&(x<dest->w)){
px = abs((dx<x)?dx-x:x-dx)/ht;
py = abs((dy<y)?dy-y:y-dy)/ht;
int alp = MIN(0xff,MAX((1-(px+py)) * 0xff,0));
wordp d = dest->data + pos + x;
word cl = libaroma_alpha(*d, color, alp);
if (alpha!=0xff){
cl=libaroma_alpha(*d,cl,alpha);
}
*d=cl;
}
}
}
}
return 1;
} /* End of libaroma_draw_subpixel */
/*
* Function : libaroma_draw_mask_circle
* Return Value: byte
* Descriptions: draw masked circle
*/
byte libaroma_draw_mask_circle(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (src == NULL) {
return 0;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad2 = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
int psy = sy + y;
if ((pdy<dst->h)&&(pdy>=0)&&(psy<src->h)&&(psy>=0)){
int pos_d = pdy * dst->l;
int pos_s = psy * src->l;
int x = sqrt(rad2-y*y);
int w = x*2;
if (sx-x<0){
w-=abs(sx-x);
x=sx;
}
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
int sdx = sx-x;
if (sdx+w>src->w){
w=src->w-sdx;
}
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
wordp sd = src->data + pos_s + sdx;
if (alpha==0xff){
memcpy(dd,sd,w*2);
}
else{
//libaroma_alpha_const_line(pdy,w,dd,dd,sd,alpha);
libaroma_alpha_const(w,dd,dd,sd,alpha);
}
}
}
}
return 1;
} /* End of libaroma_draw_mask_circle */
/*
* Function : libaroma_draw_circle
* Return Value: byte
* Descriptions: draw filled circle
*/
byte libaroma_draw_circle(
LIBAROMA_CANVASP dst,
word color,
int dx, int dy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad2 = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad2-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
if (alpha==0xff){
libaroma_color_set(dd,color,w);
}
else{
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(pdy,w,dd, dd,color,alpha);
#else
libaroma_alpha_rgba_fill(w,dd, dd,color,alpha);
#endif
}
}
}
}
return 1;
} /* End of libaroma_draw_circle */
/*
* Function : libaroma_draw_line_width
* Return Value: byte
* Descriptions: draw line with width
*/
byte libaroma_draw_line_width(
LIBAROMA_CANVASP dest,
float x1, float y1, float x2, float y2,
float wd,
word color,
byte alpha,
byte is_mask,
float aliasing){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
float angle = atan2(y2 - y1, x2 - x1);
float t2sina1 = wd / 2 * sin(angle);
float t2cosa1 = wd / 2 * cos(angle);
float t2sina2 = wd / 2 * sin(angle);
float t2cosa2 = wd / 2 * cos(angle);
LIBAROMA_PATHP path=libaroma_path(x1 + t2sina1, y1 - t2cosa1);
libaroma_path_add(path, x2 + t2sina2, y2 - t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x1 - t2sina1, y1 + t2cosa1);
libaroma_path_add(path, x1 + t2sina1, y1 - t2cosa1);
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_line_width */
/*
* Function : _libaroma_draw_arc_findpoint
* Return Value: byte
* Descriptions: find arc point
*/
byte _libaroma_draw_arc_findpoint(
LIBAROMA_PATHP path,
float dx, float dy,
float radius_w, float radius_h,
float xt0, float yt0,
float xt1, float yt1,
double start, double end
){
double radian;
if (start==end){
return 0;
}
else if (start<end){
radian = start + ((end - start) / 2.0);
}
else{
radian = end + ((start - end) / 2.0);
}
float xt = dx + radius_w*cos(radian);
float yt = dy + radius_h*sin(radian);
if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt0, yt0, xt, yt,
start, radian
);
}
libaroma_path_add(path, xt, yt);
if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt, yt, xt1, yt1,
radian, end
);
}
libaroma_path_add(path, xt1, yt1);
return 1;
} /* End of _libaroma_draw_arc_findpoint */
/*
* Function : libaroma_draw_arc
* Return Value: byte
* Descriptions: draw arc into canvas
*/
byte libaroma_draw_arc(
LIBAROMA_CANVASP dest,
float dx, float dy,
float radius_w, float radius_h,
float width,
float start_angle, float end_angle,
word color,byte alpha,byte is_mask,float aliasing
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
if (start_angle==end_angle){
/* no draw needed */
return 1;
}
/*
start_angle=fmod(start_angle,360);
end_angle=fmod(end_angle,360);
*/
/*
start_angle=360-start_angle;
end_angle=360-end_angle;
*/
if (start_angle>end_angle){
float tmp=start_angle;
start_angle=end_angle;
end_angle=tmp;
}
double start_radian = start_angle* __PI / 180.0;
double end_radian = end_angle * __PI / 180.0;
float start_x = dx + radius_w*cos(start_radian);
float start_y = dy + radius_h*sin(start_radian);
float end_x = dx + radius_w*cos(end_radian);
float end_y = dy + radius_h*sin(end_radian);
LIBAROMA_PATHP path=libaroma_path(start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
start_radian, end_radian
);
libaroma_path_add(path, end_x, end_y);
if ((width>0)&&(width<radius_w/2)&&(width<radius_h/2)) {
radius_w -= width;
radius_h -= width;
/* roll */
start_x = dx + radius_w*cos(end_radian);
start_y = dy + radius_h*sin(end_radian);
end_x = dx + radius_w*cos(start_radian);
end_y = dy + radius_h*sin(start_radian);
libaroma_path_add(path, start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
end_radian, start_radian
);
}
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_arc */
#ifdef __cplusplus
}
#endif
#endif /* __libaroma_commondraw_c__ */
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr0 = out;
float* outptr1 = outptr0 + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
float sum2 = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr0 = sum;
*outptr1 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
}
for (; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
data_partition.h | #ifndef LIGHTGBM_DATA_PARTITION_H_
#define LIGHTGBM_DATA_PARTITION_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <cstring>
#include <vector>
namespace LightGBM {
/*!
* \brief DataPartition is used to store the the partition of data on tree.
*/
class DataPartition {
public:
DataPartition(data_size_t num_data, int num_leaves)
:num_data_(num_data), num_leaves_(num_leaves) {
leaf_begin_.resize(num_leaves_);
leaf_count_.resize(num_leaves_);
indices_.resize(num_data_);
temp_left_indices_.resize(num_data_);
temp_right_indices_.resize(num_data_);
used_data_indices_ = nullptr;
#pragma omp parallel
#pragma omp master
{
num_threads_ = omp_get_num_threads();
}
offsets_buf_.resize(num_threads_);
left_cnts_buf_.resize(num_threads_);
right_cnts_buf_.resize(num_threads_);
left_write_pos_buf_.resize(num_threads_);
right_write_pos_buf_.resize(num_threads_);
}
void ResetLeaves(int num_leaves) {
num_leaves_ = num_leaves;
leaf_begin_.resize(num_leaves_);
leaf_count_.resize(num_leaves_);
}
void ResetNumData(int num_data) {
num_data_ = num_data;
indices_.resize(num_data_);
temp_left_indices_.resize(num_data_);
temp_right_indices_.resize(num_data_);
}
~DataPartition() {
}
/*!
* \brief Init, will put all data on the root(leaf_idx = 0)
*/
void Init() {
std::fill(leaf_begin_.begin(), leaf_begin_.end(), 0);
std::fill(leaf_count_.begin(), leaf_count_.end(), 0);
if (used_data_indices_ == nullptr) {
// if using all data
leaf_count_[0] = num_data_;
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_; ++i) {
indices_[i] = i;
}
} else {
// if bagging
leaf_count_[0] = used_data_count_;
std::memcpy(indices_.data(), used_data_indices_, used_data_count_ * sizeof(data_size_t));
}
}
void ResetByLeafPred(const std::vector<int>& leaf_pred, int num_leaves) {
ResetLeaves(num_leaves);
std::vector<std::vector<data_size_t>> indices_per_leaf(num_leaves_);
for (data_size_t i = 0; i < static_cast<data_size_t>(leaf_pred.size()); ++i) {
indices_per_leaf[leaf_pred[i]].push_back(i);
}
data_size_t offset = 0;
for (int i = 0; i < num_leaves_; ++i) {
leaf_begin_[i] = offset;
leaf_count_[i] = static_cast<data_size_t>(indices_per_leaf[i].size());
std::copy(indices_per_leaf[i].begin(), indices_per_leaf[i].end(), indices_.begin() + leaf_begin_[i]);
offset += leaf_count_[i];
}
}
/*!
* \brief Get the data indices of one leaf
* \param leaf index of leaf
* \param indices output data indices
* \return number of data on this leaf
*/
const data_size_t* GetIndexOnLeaf(int leaf, data_size_t* out_len) const {
// copy reference, maybe unsafe, but faster
data_size_t begin = leaf_begin_[leaf];
*out_len = leaf_count_[leaf];
return indices_.data() + begin;
}
/*!
* \brief Split the data
* \param leaf index of leaf
* \param feature_bins feature bin data
* \param threshold threshold that want to split
* \param right_leaf index of right leaf
*/
void Split(int leaf, const Dataset* dataset, int feature, const uint32_t* threshold, int num_threshold, bool default_left, int right_leaf) {
const data_size_t min_inner_size = 512;
// get leaf boundary
const data_size_t begin = leaf_begin_[leaf];
const data_size_t cnt = leaf_count_[leaf];
data_size_t inner_size = (cnt + num_threads_ - 1) / num_threads_;
if (inner_size < min_inner_size) { inner_size = min_inner_size; }
// split data multi-threading
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads_; ++i) {
OMP_LOOP_EX_BEGIN();
left_cnts_buf_[i] = 0;
right_cnts_buf_[i] = 0;
data_size_t cur_start = i * inner_size;
if (cur_start > cnt) { continue; }
data_size_t cur_cnt = inner_size;
if (cur_start + cur_cnt > cnt) { cur_cnt = cnt - cur_start; }
// split data inner, reduce the times of function called
data_size_t cur_left_count = dataset->Split(feature, threshold, num_threshold, default_left, indices_.data() + begin + cur_start, cur_cnt,
temp_left_indices_.data() + cur_start, temp_right_indices_.data() + cur_start);
offsets_buf_[i] = cur_start;
left_cnts_buf_[i] = cur_left_count;
right_cnts_buf_[i] = cur_cnt - cur_left_count;
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
data_size_t left_cnt = 0;
left_write_pos_buf_[0] = 0;
right_write_pos_buf_[0] = 0;
for (int i = 1; i < num_threads_; ++i) {
left_write_pos_buf_[i] = left_write_pos_buf_[i - 1] + left_cnts_buf_[i - 1];
right_write_pos_buf_[i] = right_write_pos_buf_[i - 1] + right_cnts_buf_[i - 1];
}
left_cnt = left_write_pos_buf_[num_threads_ - 1] + left_cnts_buf_[num_threads_ - 1];
// copy back indices of right leaf to indices_
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads_; ++i) {
if (left_cnts_buf_[i] > 0) {
std::memcpy(indices_.data() + begin + left_write_pos_buf_[i],
temp_left_indices_.data() + offsets_buf_[i], left_cnts_buf_[i] * sizeof(data_size_t));
}
if (right_cnts_buf_[i] > 0) {
std::memcpy(indices_.data() + begin + left_cnt + right_write_pos_buf_[i],
temp_right_indices_.data() + offsets_buf_[i], right_cnts_buf_[i] * sizeof(data_size_t));
}
}
// update leaf boundary
leaf_count_[leaf] = left_cnt;
leaf_begin_[right_leaf] = left_cnt + begin;
leaf_count_[right_leaf] = cnt - left_cnt;
}
/*!
* \brief SetLabelAt used data indices before training, used for bagging
* \param used_data_indices indices of used data
* \param num_used_data number of used data
*/
void SetUsedDataIndices(const data_size_t* used_data_indices, data_size_t num_used_data) {
used_data_indices_ = used_data_indices;
used_data_count_ = num_used_data;
}
/*!
* \brief Get number of data on one leaf
* \param leaf index of leaf
* \return number of data of this leaf
*/
data_size_t leaf_count(int leaf) const { return leaf_count_[leaf]; }
/*!
* \brief Get leaf begin
* \param leaf index of leaf
* \return begin index of this leaf
*/
data_size_t leaf_begin(int leaf) const { return leaf_begin_[leaf]; }
const data_size_t* indices() const { return indices_.data(); }
/*! \brief Get number of leaves */
int num_leaves() const { return num_leaves_; }
private:
/*! \brief Number of all data */
data_size_t num_data_;
/*! \brief Number of all leaves */
int num_leaves_;
/*! \brief start index of data on one leaf */
std::vector<data_size_t> leaf_begin_;
/*! \brief number of data on one leaf */
std::vector<data_size_t> leaf_count_;
/*! \brief Store all data's indices, order by leaf[data_in_leaf0,..,data_leaf1,..] */
std::vector<data_size_t> indices_;
/*! \brief team indices buffer for split */
std::vector<data_size_t> temp_left_indices_;
/*! \brief team indices buffer for split */
std::vector<data_size_t> temp_right_indices_;
/*! \brief used data indices, used for bagging */
const data_size_t* used_data_indices_;
/*! \brief used data count, used for bagging */
data_size_t used_data_count_;
/*! \brief number of threads */
int num_threads_;
/*! \brief Buffer for multi-threading data partition, used to store offset for different threads */
std::vector<data_size_t> offsets_buf_;
/*! \brief Buffer for multi-threading data partition, used to store left count after split for different threads */
std::vector<data_size_t> left_cnts_buf_;
/*! \brief Buffer for multi-threading data partition, used to store right count after split for different threads */
std::vector<data_size_t> right_cnts_buf_;
/*! \brief Buffer for multi-threading data partition, used to store write position of left leaf for different threads */
std::vector<data_size_t> left_write_pos_buf_;
/*! \brief Buffer for multi-threading data partition, used to store write position of right leaf for different threads */
std::vector<data_size_t> right_write_pos_buf_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_PARTITION_H_
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/fourier.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
PixelChannel
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(images,complex_images,images->rows,1L)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register Quantum
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) ||
(Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) ||
(Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(images); i++)
{
switch (op)
{
case AddComplexOperator:
{
Cr[i]=Ar[i]+Br[i];
Ci[i]=Ai[i]+Bi[i];
break;
}
case ConjugateComplexOperator:
default:
{
Cr[i]=Ar[i];
Ci[i]=(-Bi[i]);
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br[i]*Br[i]+Bi[i]*Bi[i]+snr);
Cr[i]=gamma*(Ar[i]*Br[i]+Ai[i]*Bi[i]);
Ci[i]=gamma*(Ai[i]*Br[i]-Ar[i]*Bi[i]);
break;
}
case MagnitudePhaseComplexOperator:
{
Cr[i]=sqrt(Ar[i]*Ar[i]+Ai[i]*Ai[i]);
Ci[i]=atan2(Ai[i],Ar[i])/(2.0*MagickPI)+0.5;
break;
}
case MultiplyComplexOperator:
{
Cr[i]=QuantumScale*(Ar[i]*Br[i]-Ai[i]*Bi[i]);
Ci[i]=QuantumScale*(Ai[i]*Br[i]+Ar[i]*Bi[i]);
break;
}
case RealImaginaryComplexOperator:
{
Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5));
Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5));
break;
}
case SubtractComplexOperator:
{
Cr[i]=Ar[i]-Br[i];
Ci[i]=Ai[i]-Bi[i];
break;
}
}
}
Ar+=GetPixelChannels(Ar_image);
Ai+=GetPixelChannels(Ai_image);
Br+=GetPixelChannels(Br_image);
Bi+=GetPixelChannels(Bi_image);
Cr+=GetPixelChannels(Cr_image);
Ci+=GetPixelChannels(Ci_image);
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register Quantum
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(magnitude_image);
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(phase_image);
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(image,p);
break;
}
case GreenPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(image,p);
break;
}
case BluePixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(image,p);
break;
}
case BlackPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlack(image,p);
break;
}
case AlphaPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelAlpha(image,p);
break;
}
}
i++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsImageGray(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,
RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->alpha_trait != UndefinedPixelTrait)
thread_status=ForwardFourierTransformChannel(image,
AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p);
break;
}
case GreenPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p);
break;
}
case BluePixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p);
break;
}
case BlackPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p);
break;
}
case AlphaPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p);
break;
}
}
i++;
p+=GetPixelChannels(magnitude_image);
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p);
break;
}
case GreenPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p);
break;
}
case BluePixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p);
break;
}
case BlackPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p);
break;
}
case AlphaPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p);
break;
}
}
i++;
p+=GetPixelChannels(phase_image);
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register Quantum
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
}
i++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsImageGray(magnitude_image);
if (is_gray != MagickFalse)
is_gray=IsImageGray(phase_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->alpha_trait != UndefinedPixelTrait)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
templatemath.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <system/dll.h>
#include <system/pointercast.h>
#include <math/platformmath.h>
#include <array/DataTypeUtils.h>
#define BFLOAT16_MAX_VALUE 32737.
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace sd {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template <typename T>
math_def inline bool nd4j_eq(T val1, T val2, double eps);
template<typename T, typename Z>
math_def inline Z nd4j_re(T val1, T val2);
template<typename T, typename Z>
math_def inline Z nd4j_rint(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_copysign(T val1, T val2);
template <typename T, typename Z>
math_def inline Z nd4j_softplus(T val);
template <typename T>
math_def inline T nd4j_rotl(T val, T shift);
template <typename T>
math_def inline T nd4j_rotr(T val, T shift);
//#ifndef __CUDACC__
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length);
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_cos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_cosh(T val);
template<typename X, typename Z>
math_def inline Z nd4j_exp(X val);
template<typename T, typename Z>
math_def inline Z nd4j_floor(T val);
template<typename X, typename Z>
math_def inline Z nd4j_log(X val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2);
template<typename T, typename Z>
math_def inline Z nd4j_round(T val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X num, Y denom);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X num, Y denom);
template<typename T, typename Z>
math_def inline Z nd4j_erf(T num);
template<typename T, typename Z>
math_def inline Z nd4j_erfc(T num);
math_def inline int32_t floatToRawIntBits(float d) {
union {
float f;
int32_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
math_def inline float intBitsToFloat(int32_t i) {
union {
float f;
int32_t i;
} tmp;
tmp.i = i;
return tmp.f;
}
math_def inline float mulsignf(float x, float y) {
return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31)));
}
math_def inline float copysignfk(float x, float y) {
return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31)));
}
template<typename T, typename Z>
math_def inline Z nd4j_sigmoid(T val) {
return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val));
}
template<typename T, typename Z>
math_def inline Z nd4j_elu(T val, T alpha) {
if (val >= (T) 0.f)
return val;
return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f));
}
template<typename T, typename Z>
math_def inline Z nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f)
return alpha * val;
else
return val;
}
template<typename T, typename Z>
math_def inline Z nd4j_eluderivative(T val, T alpha) {
if (val >= static_cast<T>(0.0f))
return static_cast<Z>(1.0f);
return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_sin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_softplus(T val) {
return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val));
}
template<typename T, typename Z>
math_def inline Z nd4j_softsign(T val) {
return val / ((T) 1.0f + sd::math::nd4j_abs<T>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_sqrt(X val);
template<typename X, typename Z>
math_def inline Z nd4j_tanh(X val);
template<typename T, typename Z>
math_def inline Z nd4j_tan(T val);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2) {
return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2));
}
template<typename T, typename Z>
math_def inline Z nd4j_tan(T tval) {
return p_tan<Z>(static_cast<Z>(tval));
}
template<typename T, typename Z>
math_def inline Z nd4j_tanhderivative(T val) {
Z tanh = nd4j_tanh<T,Z>(val);
return (Z) 1.0f - tanh * tanh;
}
template <typename T, typename Z>
math_def inline T nd4j_sigmoidderivative(T val) {
Z sigmoid = nd4j_sigmoid<T,Z>(val);
return sigmoid * ((Z) 1.0f - sigmoid);
}
template<typename T, typename Z>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (Z) 1.0f / (y * y);
}
template<typename T, typename Z>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f;
}
template<typename T, typename Z>
math_def inline Z nd4j_sign(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_signum(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_gamma(X a);
template<typename X, typename Z>
math_def inline Z nd4j_lgamma(X x);
//#ifndef __CUDACC__
/*
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
*/
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length) {
Z dot = (Z)0.0f;
for(int e = 0; e < length; e++) {
dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]);
}
return dot;
}
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_acos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sech(T val);
template<typename T, typename Z>
math_def inline Z nd4j_acosh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val);
}
template<typename T, typename Z>
math_def inline Z nd4j_atan(T val);
template<typename T, typename Z>
math_def inline Z nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) {
return (bfloat16) fabsf((float) value);
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline bool nd4j_abs<bool>(bool value) {
return value;
}
template<>
math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) {
return value;
}
template<>
math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) {
return value;
}
template<>
math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) {
return value;
}
template<>
math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) {
return value;
}
template<>
math_def inline int8_t nd4j_abs<int8_t>(int8_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int16_t nd4j_abs<int16_t>(int16_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) {
return value == bfloat16::nan(); //0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) {
return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline bool nd4j_max(bool val1, bool val2) {
return (val1 || val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_max(T val1, T val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline bool nd4j_min(bool val1, bool val2) {
return (val1 && val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_min(T val1, T val2) {
return val1 < val2 ? val1 : val2;
}
template <typename T>
math_def inline bool nd4j_eq(T d1, T d2, double eps) {
if (sd::math::nd4j_isinf<T>(d1) && sd::math::nd4j_isinf<T>(d2)) {
if (d1 > 0 && d2 > 0)
return true;
else if (d1 < 0 && d2 < 0)
return true;
else
return false;
}
auto diff = static_cast<double>(sd::math::nd4j_abs<T>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return true;
// Knuth approach
// works well except in the range of very small numbers
if (diff <= sd::math::nd4j_max<double>(sd::math::nd4j_abs<double>(static_cast<double>(d1)), sd::math::nd4j_abs<double>(static_cast<double>(d2))) * eps)
return true;
return false;
}
template <typename X, typename Z>
math_def inline Z nd4j_ceil(X val) {
return static_cast<Z>(p_ceil<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_round(X val) {
return static_cast<Z>(p_round<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_asin(X val) {
return p_asin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atan(X val) {
return p_atan<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atanh(X val) {
return p_atanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cosh(X val) {
return p_cosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_rint(X val) {
return p_rint<X>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_sinh(X val) {
return p_sinh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_acos(X val) {
return p_acos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sech(X val) {
return static_cast<Z>(1) / nd4j_cosh<X,Z>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_acosh(X val) {
return p_acosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cos(X val) {
return p_cos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_exp(X val) {
return p_exp<X>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_floor(X val) {
return static_cast<Z>(p_floor<X>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_log(X val) {
return static_cast<Z>(p_log<X>(val));
}
/**
* This func is special case - it must return floating point value, and optionally Y arg can be floating point argument
* @tparam X
* @tparam Y
* @tparam Z
* @param val
* @param val2
* @return
*/
template <>
math_def inline float nd4j_pow(float val, float val2) {
return p_pow<float>(val, val2);
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2) {
return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
/**
* LogGamma(a) - float point extension of ln(n!)
**/
template <typename X, typename Z>
math_def inline Z nd4j_lgamma(X x) {
// if (x <= X(0.0))
// {
// std::stringstream os;
// os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given.";
// throw std::invalid_argument( os.str() );
// }
if (x < X(12.0)) {
return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x));
}
// Abramowitz and Stegun 6.1.41
// Asymptotic series should be good to at least 11 or 12 figures
// For error analysis, see Whittiker and Watson
// A Course in Modern Analysis (1927), page 252
static const double c[8] = {
1.0/12.0,
-1.0/360.0,
1.0/1260.0,
-1.0/1680.0,
1.0/1188.0,
-691.0/360360.0,
1.0/156.0,
-3617.0/122400.0
};
double z = Z(1.0 / Z(x * x));
double sum = c[7];
for (int i = 6; i >= 0; i--) {
sum *= z;
sum += c[i];
}
double series = sum / Z(x);
static const double halfLogTwoPi = 0.91893853320467274178032973640562;
return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X val, Y val2) {
return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X val, Y val2) {
return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Z>
math_def inline Z nd4j_sin(X val) {
return p_sin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sqrt(X val) {
return p_sqrt<Z>(static_cast<Z>(val));
}
template <typename X>
math_def inline X neg_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (p - o)/ (p + o);
}
template <typename X>
math_def inline X pos_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(-2.0f);
X e = static_cast<X>(M_E);
auto p = sd::math::nd4j_pow<X, X, X>(e, val * t);
return (o - p) / (o + p);
}
math_def inline float neu_tanh(float val, float sign) {
float e(M_E);
float av = sign * val;
auto p = sd::math::nd4j_pow<float, float, float>(e, -av * 2.f);
return (1 - p) / (1 + p);
}
template <>
math_def inline float nd4j_tanh(float val) {
float sign = copysignfk(1.0f, val);
return sign * neu_tanh(val, sign);
}
template <typename X, typename Z>
math_def inline Z nd4j_tanh(X val) {
return val <= 0 ? neg_tanh(val) : pos_tanh(val);
}
template <typename T>
math_def inline T nd4j_rotl(T val, T shift) {
return p_rotl<T>(val, shift);
}
template <typename T>
math_def inline T nd4j_rotr(T val, T shift) {
return p_rotr<T>(val, shift);
}
template <typename X, typename Z>
math_def inline Z nd4j_erf(X val) {
return p_erf<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erfc(X val) {
return p_erfc<Z>(static_cast<Z>(val));
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
template <typename X, typename Z>
math_def inline Z nd4j_gamma(X a) {
// nd4j_lgamma<X,Z>(a);
// return (Z)std::tgamma(a);
// Split the function domain into three intervals:
// (0, 0.001), [0.001, 12), and (12, infinity)
///////////////////////////////////////////////////////////////////////////
// First interval: (0, 0.001)
//
// For small a, 1/Gamma(a) has power series a + gamma a^2 - ...
// So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3.
// The relative error over this interval is less than 6e-7.
const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant
if (a < X(0.001))
return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a)));
///////////////////////////////////////////////////////////////////////////
// Second interval: [0.001, 12)
if (a < X(12.0)) {
// The algorithm directly approximates gamma over (1,2) and uses
// reduction identities to reduce other arguments to this interval.
double y = (double)a;
int n = 0;
bool argWasLessThanOne = y < 1.0;
// Add or subtract integers as necessary to bring y into (1,2)
// Will correct for this below
if (argWasLessThanOne) {
y += 1.0;
}
else {
n = static_cast<int>(floor(y)) - 1; // will use n later
y -= n;
}
// numerator coefficients for approximation over the interval (1,2)
static const double p[] = {
-1.71618513886549492533811E+0,
2.47656508055759199108314E+1,
-3.79804256470945635097577E+2,
6.29331155312818442661052E+2,
8.66966202790413211295064E+2,
-3.14512729688483675254357E+4,
-3.61444134186911729807069E+4,
6.64561438202405440627855E+4
};
// denominator coefficients for approximation over the interval (1,2)
static const double q[] = {
-3.08402300119738975254353E+1,
3.15350626979604161529144E+2,
-1.01515636749021914166146E+3,
-3.10777167157231109440444E+3,
2.25381184209801510330112E+4,
4.75584627752788110767815E+3,
-1.34659959864969306392456E+5,
-1.15132259675553483497211E+5
};
double num = 0.0;
double den = 1.0;
double z = y - 1;
for (auto i = 0; i < 8; i++) {
num = (num + p[i]) * z;
den = den * z + q[i];
}
double result = num / den + 1.0;
// Apply correction if argument was not initially in (1,2)
if (argWasLessThanOne) {
// Use identity gamma(z) = gamma(z+1)/z
// The variable "result" now holds gamma of the original y + 1
// Thus we use y-1 to get back the orginal y.
result /= (y - 1.0);
}
else {
// Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z)
for (auto i = 0; i < n; i++)
result *= y++;
}
return Z(result);
}
///////////////////////////////////////////////////////////////////////////
// Third interval: [12, infinity)
if (a > 171.624) {
// Correct answer too large to display. Force +infinity.
return Z(DOUBLE_MAX_VALUE);
// return DataTypeUtils::infOrMax<Z>();
}
return sd::math::nd4j_exp<Z,Z>(sd::math::nd4j_lgamma<X,Z>(a));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igamma(X a, Y x) {
Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a));
auto sum = Z(0.);
auto denom = Z(1.);
if (a <= X(0.000001))
//throw std::runtime_error("Cannot calculate gamma for a zero val.");
return Z(0);
for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) {
denom *= (a + i);
sum += nd4j_pow<X, int, Z>(x, i) / denom;
}
return aim * sum;
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igammac(X a, Y x) {
return Z(1.) - nd4j_igamma<X, Y, Z>(a, x);
}
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMin(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMax(T* address, T val);
template <>
inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ float nd4j_atomicMin<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ double nd4j_atomicMin<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = (unsigned long long)val, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) {
return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data));
}
template <>
inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) {
return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data));
}
template <>
inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ double nd4j_atomicMax<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicMax<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int32_t)val);
return *address;
}
template <>
inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = nd4j_max((float16) old.B.H, val);
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = nd4j_max((float16) old.B.L, val);
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = nd4j_max(old.B.H, val);
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = nd4j_max(old.B.L, val);
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMax((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
}
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ long nd4j_atomicAdd<long>(long* address, long val) {
unsigned long long* address_as_ull = (unsigned long long int *) address;
// return atomicAdd(address, val);
unsigned long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) {
return atomicAdd(address, val);
}
template <>
inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) {
// unsigned long long* address_as_ull = (unsigned long long int *) address;
//
//// return atomicAdd(address, val);
// unsigned long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, val + assumed);
// } while (assumed != old);
// return old;
return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
#if __CUDA_ARCH__ >= 700 && defined(CUDA_10)
atomicAdd(reinterpret_cast<__half*>(address), val.data);
#else
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
#endif
}
template <>
inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
auto addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H + val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L + val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <typename T>
static inline __device__ T internal_16bit_atomicAdd(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicAdd<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicAdd<uint16_t>(address, val);
}
template <>
inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) {
*address += (val);
return *address;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
return nd4j_atomicAdd<double>(address, -val);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
return nd4j_atomicMul<double>(address, 1./val);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
//template <>
//inline __device__ int nd4j_atomicAdd<int>(int* address, int val) {
// return atomicAdd(address, val);
//}
template <>
inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) {
return (int32_t)atomicAdd((int*)address, (int)val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
return nd4j_atomicAdd<float>(address, -val);
}
template <>
inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) {
return nd4j_atomicAdd<float16>(address, -val);
}
template <>
inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) {
return nd4j_atomicAdd<bfloat16>(address, -val);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (int8_t)old;
}
template <>
inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (uint8_t)old;
}
template <typename T>
static inline __device__ T internal_16bit_atomicMul(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicMul<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicMul<uint16_t>(address, val);
}
template <>
inline __device__ int nd4j_atomicMul<int>(int* address, int val) {
int* res_address = address;
int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) {
unsigned int* res_address = address;
unsigned int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (int64_t)old;
}
template <>
inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (uint64_t)old;
}
#if !defined(_WIN32) && !defined(_WIN64)
template <>
inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* res_address = (unsigned long long*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (Nd4jLong)old;
}
#endif
template <>
inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, val);
}
template <>
inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, val);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
return nd4j_atomicMul<float>(address, 1.f / val);
}
template <>
inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val);
}
template <>
inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val);
}
}
#endif
}
}
#ifdef _OPENMP
#ifndef MAX_FLOAT
#define MAX_FLOAT 1e37
#endif
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_max(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_min(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = sd::math::nd4j_abs(omp_in) + sd::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
#endif /* TEMPLATEMATH_H_ */
|
collective_scatterGather.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Implements the scatter and gather mixed mode OpenMP/MPI */
/* benchmarks. */
/*-----------------------------------------------------------*/
#include "collective_scatterGather.h"
/*-----------------------------------------------------------*/
/* scatterGather */
/* */
/* Driver routine for the scatter benchmark. */
/*-----------------------------------------------------------*/
int scatterGather(int benchmarkType) {
int dataSizeIter, bufferSize;
/* Initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter */
while (dataSizeIter <= maxDataSize) {
/* Calculate buffer size and allocate spaces for
* data arrays.
*/
bufferSize = dataSizeIter * numThreads;
if (benchmarkType == SCATTER) {
allocateScatterGatherData(bufferSize, benchmarkType);
/* perform benchmark warm-up */
scatterKernel(warmUpIters, dataSizeIter);
/* Test if scatter was successful */
testScatterGather(bufferSize, benchmarkType);
} else if (benchmarkType == GATHER) {
allocateScatterGatherData(bufferSize, benchmarkType);
/* Perform benchmark warm-up */
gatherKernel(warmUpIters, dataSizeIter);
/* Test if gather was successful */
if (myMPIRank == GATHERROOT) {
testScatterGather(bufferSize * numMPIprocs, benchmarkType);
}
}
/* Initialise the benchmark */
benchComplete = FALSE;
/* Execute benchmark until target time is reached */
while (benchComplete != TRUE) {
/* Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == SCATTER) {
/* Execute scatter for repsToDo repetitions */
scatterKernel(repsToDo, dataSizeIter);
} else if (benchmarkType == GATHER) {
/* Execute gather for repsToDo repetitions */
gatherKernel(repsToDo, dataSizeIter);
}
/* Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time was reached */
if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); }
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark result for reporting */
if (myMPIRank == 0) {
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeScatterGatherData(benchmarkType);
/* Dobule data size and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* scatterKernel */
/* */
/* Implement the scatter benchmark. */
/* Root process first scatters send buffer to other */
/* processes. */
/* Each thread under a MPI process then reads its portion */
/* of scatterRecvBuf. */
/*-----------------------------------------------------------*/
int scatterKernel(int totalReps, int dataSize) {
int repIter, i;
int totalSendBufElems, sendCount, recvCount;
/* Calculate totalSendBufElems */
totalSendBufElems = numMPIprocs * numThreads * dataSize;
/* Calculate sendCount */
sendCount = dataSize * numThreads;
recvCount = sendCount;
for (repIter = 0; repIter < totalReps; repIter++) {
/* Master process writes to scatterSendBuf */
if (myMPIRank == SCATTERROOT) {
for (i = 0; i < totalSendBufElems; i++) {
scatterSendBuf[i] = SCATTERSTARTVAL + i;
}
}
/* Scatter the data to other processes */
MPI_Scatter(scatterSendBuf, sendCount, MPI_INT, scatterRecvBuf, recvCount,
MPI_INT, SCATTERROOT, comm);
/* Each thread now reads its portion of scatterRecvBuf */
#pragma omp parallel for default(none) private(i) \
shared(dataSize, recvCount, finalBuf, scatterRecvBuf) \
schedule(static, dataSize)
for (i = 0; i < recvCount; i++) { /* loop over all data in recv buffer */
finalBuf[i] = scatterRecvBuf[i];
}
} /* End of loop over reps */
return 0;
}
/*-----------------------------------------------------------*/
/* gatherKernel */
/* */
/* Implements the gather benchmark. */
/* Each thread writes part of its buffer then all data */
/* is gathered to the master process. */
/*-----------------------------------------------------------*/
int gatherKernel(int totalReps, int dataSize) {
int repIter, i;
int totalRecvBufElems, sendCount, recvCount;
int startVal;
/* Calculate totalRecvBufElems */
totalRecvBufElems = dataSize * numThreads * numMPIprocs;
/* Each process calculates its send and recv count */
sendCount = dataSize * numThreads;
recvCount = sendCount;
/* Calculate startVal for each process.
* This is used to find the values of gatherSendBuf.
*/
startVal = (myMPIRank * sendCount) + GATHERSTARTVAL;
for (repIter = 0; repIter < totalReps; repIter++) {
/* Each thread writes to its portion of gatherSendBuf */
#pragma omp parallel for default(none) private(i) \
shared(gatherSendBuf, startVal, dataSize, sendCount) \
schedule(static, dataSize)
for (i = 0; i < sendCount; i++) { gatherSendBuf[i] = startVal + i; }
/* Gather the data to GATHERROOT */
MPI_Gather(gatherSendBuf, sendCount, MPI_INT, gatherRecvBuf, recvCount,
MPI_INT, GATHERROOT, comm);
/* GATHERROOT process then copies its received data
* to finalBuf.
*/
if (myMPIRank == GATHERROOT) {
for (i = 0; i < totalRecvBufElems; i++) {
finalBuf[i] = gatherRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateScatterGatherData */
/* */
/* Allocate memory for main data arrays */
/*-----------------------------------------------------------*/
int allocateScatterGatherData(int bufferSize, int benchmarkType) {
if (benchmarkType == SCATTER) {
/* scatterSendBuf is size (bufferSize * numMPIprocs) */
if (myMPIRank == SCATTERROOT) {
scatterSendBuf = (int *)malloc((bufferSize * numMPIprocs) * sizeof(int));
}
scatterRecvBuf = (int *)malloc(bufferSize * sizeof(int));
finalBuf = (int *)malloc(bufferSize * sizeof(int));
} else if (benchmarkType == GATHER) {
gatherSendBuf = (int *)malloc(bufferSize * sizeof(int));
if (myMPIRank == GATHERROOT) {
gatherRecvBuf = (int *)malloc((bufferSize * numMPIprocs) * sizeof(int));
finalBuf = (int *)malloc((bufferSize * numMPIprocs) * sizeof(int));
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* freeScatterGatherData */
/* */
/* Free memory of main data arrays. */
/*-----------------------------------------------------------*/
int freeScatterGatherData(int benchmarkType) {
if (benchmarkType == SCATTER) {
if (myMPIRank == SCATTERROOT) { free(scatterSendBuf); }
free(scatterRecvBuf);
free(finalBuf);
} else if (benchmarkType == GATHER) {
free(gatherSendBuf);
if (myMPIRank == GATHERROOT) {
free(gatherRecvBuf);
free(finalBuf);
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* testScatterGather */
/* */
/* Verifies that the scatter and gahter benchmarks worked */
/* correctly. */
/*-----------------------------------------------------------*/
int testScatterGather(int sizeofBuffer, int benchmarkType) {
int i, startVal;
int testFlag, reduceFlag;
int *testBuf;
/* Initialise testFlag to true */
testFlag = TRUE;
/* Allocate space for testBuf */
testBuf = (int *)malloc(sizeofBuffer * sizeof(int));
if (benchmarkType == SCATTER) {
/* Find the start scatter value for each MPI process */
startVal = (myMPIRank * sizeofBuffer) + SCATTERSTARTVAL;
} else if (benchmarkType == GATHER) {
/* startVal is GATHERSTARTVAL */
startVal = GATHERSTARTVAL;
}
/* Fill testBuf with correct values */
for (i = 0; i < sizeofBuffer; i++) { testBuf[i] = startVal + i; }
/* Compare each element of finalBuf with testBuf */
for (i = 0; i < sizeofBuffer; i++) {
if (finalBuf[i] != testBuf[i]) { testFlag = FALSE; }
}
/* For scatter: reduce testFlag into master with
* logical AND operator.
*/
if (benchmarkType == SCATTER) {
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master then sets testOutcome using reduceFlag */
if (myMPIRank == 0) { setTestOutcome(reduceFlag); }
} else if (benchmarkType == GATHER) {
setTestOutcome(testFlag);
}
free(testBuf);
return 0;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 16-inch-outch
// dst = inch-16-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 16, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 16, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 2;
int h_tiles = outh / 2;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 16, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd23_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd23_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = inch-36-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 36, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 36, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
version3_1.c | // Compile with:
//
//
// To specify the number of bodies in the world, the program optionally accepts
// an integer as its first command line argument.
#include <time.h>
#include <sys/times.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <X11/Xlib.h>
#include <unistd.h>
#include "omp.h"
#define WIDTH 1024
#define HEIGHT 768
// default number of bodies
#define DEF_NUM_BODIES 2000
// gravitational constant
#define GRAV 10.0
// initial velocities are scaled by this value
#define V_SCALAR 20.0
// initial masses are scaled by this value
#define M_SCALAR 5.0
// radius scalar
#define R_SCALAR 3
// coefficient of restitution determines the elasticity of a collision: C_REST = [0,1]
// if C_REST = 0 -> perfectly inelastic (particles stick together)
// if C_REST = 1 -> perfectly elastic (no loss of speed)
#define C_REST 0.5
// set the iteration times
#define iteration_times 100
// Must set 0 if run on Pi
#define NOT_RUN_ON_PI 1
#define TREERATIO 3
struct body {
double x, y; // position
double vx, vy; // velocity
double ax, ay; //accelerate
double m; // mass
double r; // radius of the particle
};
struct world {
struct body *bodies;
int num_bodies;
};
clock_t total_time = 0;
//total_time.sec = 0;
//total_time.usec = 0;
double max (double a, double b)
{return (a > b ? a : b);}
double min (double a, double b)
{return (a < b ? a : b);}
struct node
{
struct body * bodyp;
struct node * q1;
struct node * q2;
struct node * q3;
struct node * q4;
double totalmass;
double centerx, centery;
double xmin, xmax;
double ymin, ymax;
double diag;
};
enum quadrant { q1, q2, q3, q4 };
enum quadrant getquadrant(double x, double y, double xmin, double xmax, double ymin, double ymax)
//makes a rectangle with bounds of xmin,xmax,ymin,ymax, and returns the quadrant that (x,y) is in
{
double midx, midy;
midx = xmin + 0.5*(xmax - xmin);
midy = ymin + 0.5*(ymax - ymin);
if(y > midy)
{
if(x > midx) return q1;
else return q2;
}
else {
if(x > midx) return q4;
else return q3;
}
}
struct node * createnode(struct body * bodyp, double xmin, double xmax, double ymin, double ymax)
//creates a leaf node to insert into the tree
{
struct node * rootnode;
if(!(rootnode=malloc(sizeof(struct node))))
{
printf("Unable to allocate node, exit");
return 0;
}
rootnode->totalmass = bodyp->m;
rootnode->centerx = bodyp->x;
rootnode->centery = bodyp->y;
rootnode->xmin = xmin;
rootnode->xmax = xmax;
rootnode->ymin = ymin;
rootnode->ymax = ymax;
rootnode->diag = sqrt(( pow(xmax - xmin, 2) + pow(ymax - ymin, 2) ));
rootnode->bodyp = bodyp;
rootnode->q1 = NULL;
rootnode->q2 = NULL;
rootnode->q3 = NULL;
rootnode->q4 = NULL;
return rootnode;
}
void updatecenterofmass(struct node * nodep, struct body * bodyp)
//updates the center of mass after inserting a point into a branch
{
nodep->centerx = (nodep->totalmass*nodep->centerx + bodyp->m*bodyp->x)/(nodep->totalmass + bodyp->m);
nodep->centery = (nodep->totalmass*nodep->centery + bodyp->m*bodyp->y)/(nodep->totalmass + bodyp->m);
nodep->totalmass += bodyp->m;
return;
}
void insertbody(struct body * insbody, struct node * nodep)
//inserts a body into the tree, converting leaf nodes into branches if necessary
{
enum quadrant existingquad, newquad;
double xmid, ymid;
xmid = nodep->xmin + 0.5*(nodep->xmax - nodep->xmin);
ymid = nodep->ymin + 0.5*(nodep->ymax - nodep->ymin);
if(nodep->bodyp != NULL) //if the node is a leaf convert to a branch by inserting the leaf point into one of its subquadrants
{
existingquad = getquadrant(nodep->bodyp->x, nodep->bodyp->y, nodep->xmin, nodep->xmax, nodep->ymin, nodep->ymax);
switch (existingquad)
{
case q1:
nodep->q1 = createnode(nodep->bodyp, xmid, nodep->xmax, ymid, nodep->ymax);
break;
case q2:
nodep->q2 = createnode(nodep->bodyp, nodep->xmin, xmid, ymid, nodep->ymax);
break;
case q3:
nodep->q3 = createnode(nodep->bodyp, nodep->xmin, xmid, nodep->ymin, ymid);
break;
case q4:
nodep->q4 = createnode(nodep->bodyp, xmid, nodep->xmax, nodep->ymin, ymid);
break;
}
nodep->bodyp = NULL;
}
newquad = getquadrant(insbody->x, insbody->y, nodep->xmin, nodep->xmax, nodep->ymin, nodep->ymax);
updatecenterofmass(nodep,insbody);
switch (newquad) //insert the new point into one of the quadrants if empty, otherwise recurse deeper into tree
{
case q1:
if(nodep->q1 == NULL)
{
nodep->q1 = createnode(insbody, xmid, nodep->xmax, ymid, nodep->ymax);
} else {
insertbody(insbody,nodep->q1);
}
break;
case q2:
if(nodep->q2 == NULL)
{
nodep->q2 = createnode(insbody, nodep->xmin, xmid, ymid, nodep->ymax);
} else {
insertbody(insbody,nodep->q2);
}
break;
case q3:
if(nodep->q3 == NULL)
{
nodep->q3 = createnode(insbody, nodep->xmin, xmid, nodep->ymin, ymid);
} else {
insertbody(insbody,nodep->q3);
}
break;
case q4:
if(nodep->q4 == NULL)
{
nodep->q4 = createnode(insbody, xmid, nodep->xmax, nodep->ymin, ymid);
} else {
insertbody(insbody,nodep->q4);
}
break;
}
}
void treesum(struct node * nodep, struct body * bodyp, double ratiothreshold )
//sum the forces on body bodyp from points in tree with root node nodep
{
double dx, dy, r, rsqr; //x distance, y distance, distance, distance^2
double accel;
double a_over_r;
dx = nodep->centerx - bodyp->x;
dy = nodep->centery - bodyp->y;
rsqr = pow(dx,2) + pow(dy,2);
r = sqrt(rsqr);
if(r < 25){
r = 25;
}
if( (((r/nodep->diag) > ratiothreshold) || (nodep->bodyp))&&(nodep->bodyp!=bodyp) )
{
accel = (10.0) * nodep->totalmass / r/r/r;
bodyp->ax += accel*dx;
bodyp->ay += accel*dy;
} else {
if(nodep->q1) { treesum(nodep->q1, bodyp, ratiothreshold); }
if(nodep->q2) { treesum(nodep->q2, bodyp, ratiothreshold); }
if(nodep->q3) { treesum(nodep->q3, bodyp, ratiothreshold); }
if(nodep->q4) { treesum(nodep->q4, bodyp, ratiothreshold); }
}
return;
}
void destroytree(struct node * nodep)
{
if(nodep != NULL)
{
if(nodep->q1 != NULL)
{
destroytree(nodep->q1);
}
if(nodep->q2 != NULL)
{
destroytree(nodep->q2);
}
if(nodep->q3 != NULL)
{
destroytree(nodep->q3);
}
if(nodep->q4 != NULL)
{
destroytree(nodep->q4);
}
free(nodep);
}
}
/* This function initializes each particle's mass, velocity and position */
struct world* create_world(int num_bodies) {
struct world *world = malloc(sizeof(struct world));
world->num_bodies = num_bodies;
world->bodies = malloc(sizeof(struct body)*num_bodies);
int i = 0;
double x;
double y;
double rc;
int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT;
while (i<num_bodies) {
x = drand48() * WIDTH;
y = drand48() * HEIGHT;
rc = sqrt((WIDTH/2-x)*(WIDTH/2-x) + (y-HEIGHT/2)*(y-HEIGHT/2));
if (rc <= min_dim/2) {
world->bodies[i].x = x;
world->bodies[i].y = y;
world->bodies[i].vx = V_SCALAR * (y-HEIGHT/2) / rc;
world->bodies[i].vy = V_SCALAR * (WIDTH/2-x) / rc;
world->bodies[i].ax = 0;
world->bodies[i].ay = 0;
world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR;
world->bodies[i].r = sqrt(world->bodies[i].m / M_PI) * R_SCALAR;
i++;
}
}
return world;
}
// set the foreground color given RGB values between 0..255.
void set_color(Display *disp, GC gc, int r, int g, int b){
unsigned long int p ;
if (r < 0) r = 0; else if (r > 255) r = 255;
if (g < 0) g = 0; else if (g > 255) g = 255;
if (b < 0) b = 0; else if (b > 255) b = 255;
p = (r << 16) | (g << 8) | (b) ;
XSetForeground(disp, gc, p) ;
}
/* This function updates the screen with the new positions of each particle */
void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) {
int i;
double x, y, r, r2;
// we turn off aliasing for faster draws
set_color(disp, gc, 255, 255, 255);
XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT);
for (i = 0; i < world->num_bodies; i++) {
r = world->bodies[i].r;
x = world->bodies[i].x - r;
y = world->bodies[i].y - r;
r2 = r + r;
// draw body
set_color(disp, gc, 255*7/10, 255*7/10, 255*7/10);
XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360*64);
set_color(disp, gc, 0, 0, 0);
XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360*64);
}
}
void collision_step(struct world *world) {
int a, b;
double r, x, y, vx, vy;
// Impose screen boundaries by reversing direction if body is off screen
for (a = 0; a < world->num_bodies; a++) {
r = world->bodies[a].r;
x = world->bodies[a].x;
y = world->bodies[a].y;
vx = world->bodies[a].vx;
vy = world->bodies[a].vy;
if (x-r < 0) { // left edge
if (vx < 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = r;
} else if (x+r > WIDTH) { // right edge
if (vx > 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = WIDTH - r;
}
if (y-r < 0) { // bottom edge
if (vy < 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = r;
} else if (y+r > HEIGHT) { // top edge
if (vy > 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = HEIGHT - r;
}
}
}
void position_step(struct world *world, double time_res){
struct node * rootnode;
//struct body * bodies = world->bodies;
//int nbodies = world->num_bodies;
double xmin, xmax, ymin, ymax;
xmin = 0.0;
xmax = 0.0;
ymin = 0.0;
ymax = 0.0;
for(int i = 0; i < world->num_bodies; i++) //reset accel
{
world->bodies[i].ax = 0.0;
world->bodies[i].ay = 0.0;
xmin=min(xmin,world->bodies[i].x);
xmax=max(xmax,world->bodies[i].x);
ymin=min(ymin,world->bodies[i].y);
ymax=max(ymax,world->bodies[i].y);
}
rootnode = createnode(world->bodies+0,xmin,xmax,ymin,ymax);
//rootnode = createnode(bodies+0,0,WIDTH,0,HEIGHT);
for(int i = 1; i < world->num_bodies; i++)
{
insertbody(world->bodies+i, rootnode);
}
//#pragma omp parallel
{
//#pragma omp for
for(int i = 0; i < world->num_bodies; i++) //sum accel
{
treesum(rootnode, world->bodies+i, TREERATIO);
}
//#pragma omp for
for(int i = 0; i < world->num_bodies; i++)
{
//Update velocities
world->bodies[i].vx += world->bodies[i].ax * time_res;
world->bodies[i].vy += world->bodies[i].ay * time_res;
//Update positions
world->bodies[i].x += world->bodies[i].vx * time_res;
world->bodies[i].y += world->bodies[i].vy * time_res;
}
}
destroytree(rootnode);
}
void step_world(struct world *world, double time_res) {
struct tms ttt;
clock_t start, end;
start = times(&ttt);
position_step(world, time_res);
end = times(&ttt);
total_time += end - start;
collision_step(world);
}
/* Main method runs initialize() and update() */
int main(int argc, char **argv) {
//total_time.tv_sec = 0;
//total_time.tv_usec = 0;
/* get num bodies from the command line */
int num_bodies;
num_bodies = (argc == 2) ? atoi(argv[1]) : DEF_NUM_BODIES;
printf("Universe has %d bodies.\n", num_bodies);
//omp_set_num_threads(8);
/* set up the universe */
time_t cur_time;
time(&cur_time);
srand48((long)cur_time); // seed the RNG used in create_world
struct world *world = create_world(num_bodies);
/* set up graphics using Xlib */
#if NOT_RUN_ON_PI
Display *disp = XOpenDisplay(NULL);
int scr = DefaultScreen(disp);
Window win = XCreateSimpleWindow(
disp,
RootWindow(disp, scr),
0, 0,
WIDTH, HEIGHT,
0,
BlackPixel(disp, scr), WhitePixel(disp, scr));
XStoreName(disp, win, "N-Body Simulator");
Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr),
WIDTH, HEIGHT, DefaultDepth(disp, scr));
GC gc = XCreateGC(disp, back_buf, 0, 0);
// Make sure we're only looking for messages about closing the window
Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0);
XSetWMProtocols(disp, win, &del_window, 1);
XSelectInput(disp, win, StructureNotifyMask);
XMapWindow(disp, win);
XEvent event;
// wait until window is mapped
while (1) {
XNextEvent(disp, &event);
if (event.type == MapNotify) {
break;
}
}
#endif
struct timespec delay={0, 1000000000 / 60}; // for 60 FPS
struct timespec remaining;
double delta_t = 0.1;
int ii;
for(ii = 0; ii < iteration_times; ii++){
// check if the window has been closed
#if NOT_RUN_ON_PI
if (XCheckTypedEvent(disp, ClientMessage, &event)) {
break;
}
// we first draw to the back buffer then copy it to the front (`win`)
draw_world(disp, back_buf, gc, world);
XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0);
#endif
step_world(world, delta_t);
//if you want to watch the process in 60 FPS
//nanosleep(&delay, &remaining);
}
// printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000);
printf("Nbody Position Calculation Time = :%lf s\n",(double)total_time / (sysconf(_SC_CLK_TCK)));
#if NOT_RUN_ON_PI
XFreeGC(disp, gc);
XFreePixmap(disp, back_buf);
XDestroyWindow(disp, win);
XCloseDisplay(disp);
#endif
return 0;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
Quantum
*magick_restrict q;
ssize_t
count,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(IsGrayColorspace(cube_info->quantize_info->colorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
double
bisect;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha,
beta,
distance,
pixel;
DoublePixelPacket
*magick_restrict q;
PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*p->alpha);
beta=(MagickRealType) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha;
PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
Quantum
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
Quantum
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
return(status);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if (((size_t) 1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
n=number_colors;
for (depth=1; n != 0; depth++)
n>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
ssize_t
j,
y;
for (j=0; j < (ssize_t) number_threads; j++)
(void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
ssize_t
i,
k;
/*
Assign each pixel whose mean has the least squared color distance.
*/
k=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
k=i;
}
}
kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][k].count++;
kmeans_pixels[id][k].distortion+=min_distance;
SetPixelIndex(image,(Quantum) k,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (j=1; j < (ssize_t) number_threads; j++)
{
ssize_t
k;
for (k=0; k < (ssize_t) image->colors; k++)
{
kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red;
kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green;
kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue;
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black;
kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count;
kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count);
image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red;
image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green;
image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue;
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black;
distortion+=kmeans_pixels[0][j].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
ImageType
type;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
type=IdentifyImageGray(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5))
depth--;
if (IsGrayImageType(type) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image)
% MagickBooleanType AutoGammaImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set all given channels is adjusted in the same way using the
% mean average of those channels.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image)
{
return(AutoGammaImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoGammaImageChannel(Image *image,
const ChannelType channel)
{
double
gamma,
mean,
logmean,
sans;
MagickStatusType
status;
logmean=log(0.5);
if ((channel & SyncChannels) != 0)
{
/*
Apply gamma correction equally accross all given channels
*/
(void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception);
gamma=log(mean*QuantumScale)/logmean;
return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma));
}
/*
Auto-gamma each channel separateally
*/
status = MagickTrue;
if ((channel & RedChannel) != 0)
{
(void) GetImageChannelMean(image,RedChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & GreenChannel) != 0)
{
(void) GetImageChannelMean(image,GreenChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & BlueChannel) != 0)
{
(void) GetImageChannelMean(image,BlueChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
(void) GetImageChannelMean(image,OpacityChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) GetImageChannelMean(image,IndexChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange,
gamma);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image)
% MagickBooleanType AutoLevelImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set the min/max/mean value of all given channels is used for
% all given channels, to all channels in the same way.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image)
{
return(AutoLevelImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoLevelImageChannel(Image *image,
const ChannelType channel)
{
/*
Convenience method for a min/max histogram stretch.
*/
return(MinMaxStretchImage(image,channel,0.0,0.0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast)
% MagickBooleanType BrightnessContrastImageChannel(Image *image,
% const ChannelType channel,const double brightness,
% const double contrast)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast)
{
MagickBooleanType
status;
status=BrightnessContrastImageChannel(image,DefaultChannels,brightness,
contrast);
return(status);
}
MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image,
const ChannelType channel,const double brightness,const double contrast)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
intercept,
coefficients[2],
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients,
&image->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MaxTextExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelPacket
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,&image->exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetMagickToken(p,&p,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power)))));
cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power)))));
cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power)))));
}
if (image->storage_class == PseudoClass)
{
/*
Apply transfer function to colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
luma;
luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+
0.072186*image->colormap[i].blue;
image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma);
image->colormap[i].green=ClampToQuantum(luma+
color_correction.saturation*cdl_map[ScaleQuantumToMap(
image->colormap[i].green)].green-luma);
image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma);
}
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+
0.072186*GetPixelBlue(q);
SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma)));
SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma)));
SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
*clut_map;
register ssize_t
i;
ssize_t
adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*clut_map));
if (clut_map == (MagickPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
exception=(&image->exception);
clut_view=AcquireAuthenticCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetMagickPixelPacket(clut_image,clut_map+i);
(void) InterpolateMagickPixelPacket(clut_image,clut_view,
UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust),
QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixelRed(clut_map+
ScaleQuantumToMap(GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixelGreen(clut_map+
ScaleQuantumToMap(GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixelBlue(clut_map+
ScaleQuantumToMap(GetPixelBlue(q))));
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+
ScaleQuantumToMap((Quantum) GetPixelAlpha(q))));
else
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampPixelOpacity(clut_map+
ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel))));
else
SetPixelOpacity(q,ClampPixelOpacity(
clut_map+ScaleQuantumToMap(GetPixelOpacity(q))));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t)
GetPixelIndex(indexes+x))->index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImageChannel)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map);
if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
status = AccelerateContrastImage(image, sharpen, &image->exception);
if (status != MagickFalse)
return status;
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
blue,
green,
red;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by `stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
intensity;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
white;
QuantumPixelPacket
*stretch_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/* Call OpenCL version */
status = AccelerateContrastStretchImageChannel(image, channel, black_point, white_point, &image->exception);
if (status == MagickTrue)
return status;
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (QuantumPixelPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
exception=(&image->exception);
if (IsGrayImage(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace);
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
intensity;
intensity=ClampToQuantum(GetPixelIntensity(image,p));
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=(Quantum) 0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.green)/(white.green-black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue= QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.blue)/(white.blue-black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.opacity)/(white.opacity-black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.index)/(white.index-black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
SetPixelRed(q,stretch_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
SetPixelGreen(q,stretch_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
SetPixelBlue(q,stretch_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
SetPixelOpacity(q,stretch_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
SetPixelIndex(indexes+x,stretch_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImageChannel)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define Enhance(weight) \
mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \
distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \
distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \
mean)*distance*distance; \
mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \
distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) pixel.green; \
distance_squared+=4.0*distance*distance; \
mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \
distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) pixel.blue; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \
mean)*distance*distance; \
mean=((MagickRealType) r->opacity+pixel.opacity)/2; \
distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \
mean)*distance*distance; \
if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \
QuantumRange/25.0f)) \
{ \
aggregate.red+=(weight)*GetPixelRed(r); \
aggregate.green+=(weight)*GetPixelGreen(r); \
aggregate.blue+=(weight)*GetPixelBlue(r); \
aggregate.opacity+=(weight)*GetPixelOpacity(r); \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireAuthenticCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
aggregate;
MagickRealType
distance,
distance_squared,
mean,
total_weight;
PixelPacket
pixel;
register const PixelPacket
*restrict r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
r=p+(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+2*(image->columns+4);
Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0);
r=p+3*(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+4*(image->columns+4);
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight);
SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/
total_weight);
SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight);
SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/
total_weight);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
intensity,
*map,
white;
QuantumPixelPacket
*equalize_map;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/* Call OpenCL version */
status = AccelerateEqualizeImage(image, channel, &image->exception);
if (status != MagickFalse)
return status;
/*
Allocate and initialize histogram arrays.
*/
equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (QuantumPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (QuantumPixelPacket *) NULL)
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(
equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
intensity.red+=histogram[i].red;
map[i]=intensity;
continue;
}
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].opacity-black.opacity))/(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].red;
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].red;
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].red;
}
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].red);
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].red);
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].red);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].red);
}
q++;
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImageChannel)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const char *level)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
if ((gamma.red == gamma.green) && (gamma.green == gamma.blue))
status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel |
BlueChannel),(double) gamma.red);
else
{
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status&=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[ScaleQuantumToMap(
image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[ScaleQuantumToMap(
image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[ScaleQuantumToMap(
image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[ScaleQuantumToMap(
image->colormap[i].opacity)];
else
image->colormap[i].opacity=QuantumRange-gamma_map[
ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
#else
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].red,1.0/gamma);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].green,1.0/gamma);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].blue,1.0/gamma);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].opacity,1.0/gamma);
else
image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow(
QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/
gamma);
}
#endif
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,gamma_map[ScaleQuantumToMap(
GetPixelOpacity(q))]);
else
SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum)
GetPixelAlpha(q))]);
}
}
#else
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q),
1.0/gamma));
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelGreen(q),1.0/gamma));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelOpacity(q),1.0/gamma));
else
SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelAlpha(q),1.0/gamma));
}
}
#endif
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImageChannel)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the colors in the reference image to gray.
%
% The format of the GrayscaleImageChannel method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
static inline MagickRealType MagickMax(const MagickRealType x,
const MagickRealType y)
{
if (x > y)
return(x);
return(y);
}
static inline MagickRealType MagickMin(const MagickRealType x,
const MagickRealType y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
/*
Grayscale image.
*/
/* call opencl version */
status = AccelerateGrayscaleImage(image, method, &image->exception);
if (status == MagickTrue)
return status;
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
intensity,
red;
red=(MagickRealType) q->red;
green=(MagickRealType) q->green;
blue=(MagickRealType) q->blue;
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(q,ClampToQuantum(intensity));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImageChannel)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image)
% MagickBooleanType HaldClutImageChannel(Image *image,
% const ChannelType channel,Image *hald_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image)
{
return(HaldClutImageChannel(image,DefaultChannels,hald_image));
}
MagickExport MagickBooleanType HaldClutImageChannel(Image *image,
const ChannelType channel,const Image *hald_image)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
MagickRealType
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetMagickPixelPacket(hald_image,&zero);
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
hald_view=AcquireAuthenticCacheView(hald_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,hald_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
offset;
HaldInfo
point;
MagickPixelPacket
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(hald_view);
pixel=zero;
pixel1=zero;
pixel2=zero;
pixel3=zero;
pixel4=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
point.x=QuantumScale*(level-1.0)*GetPixelRed(q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(q);
offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z));
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel3);
offset+=cube_size;
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel4);
MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4,
pixel4.opacity,point.z,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(pixel.index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImageChannel)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImage(image,black_point,white_point,gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() applies the normal level operation to the image, spreading
% out the values between the black and white points over the entire range of
% values. Gamma correction is also applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma)
% MagickBooleanType LevelImageChannel(Image *image,
% const ChannelType channel,const double black_point,
% const double white_point,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantiumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
% use 1.0 for purely linear stretching of image color values
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const MagickRealType pixel)
{
double
level_pixel,
scale;
scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0;
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/
gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].red));
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel(
black_point,white_point,gamma,(MagickRealType)
image->colormap[i].green));
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].blue));
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum)
ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) (QuantumRange-image->colormap[i].opacity))));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelBlue(q))));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelAlpha(q))));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImageChannel)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantiumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma)
{
MagickBooleanType
status;
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue(
QuantumRange-image->colormap[i].opacity));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,LevelizeValue(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,LevelizeValue(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,LevelizeValue(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImageChannel)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelColorsImageChannel method is:
%
% MagickBooleanType LevelColorsImage(Image *image,
% const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
% MagickBooleanType LevelColorsImageChannel(Image *image,
% const ChannelType channel,const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickExport MagickBooleanType LevelColorsImage(Image *image,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
MagickBooleanType
status;
status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color,
invert);
return(status);
}
MagickExport MagickBooleanType LevelColorsImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *black_color,
const MagickPixelPacket *white_color,const MagickBooleanType invert)
{
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((channel & RedChannel) != 0)
status&=LevelImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
else
{
if ((channel & RedChannel) != 0)
status&=LevelizeImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelizeImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelizeImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelizeImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white,
1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,
Quantum *red,Quantum *green,Quantum *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue >= 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue >= 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue >= 1.0)
hue-=1.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue >= 1.0)
hue-=1.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue >= 1.0)
hue-=1.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
Quantum
blue,
green,
red;
/*
Modulate image colormap.
*/
red=image->colormap[i].red;
green=image->colormap[i].green;
blue=image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
case LCHColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
/* call opencl version */
status = AccelerateModulateImage(image, percent_brightness, percent_hue, percent_saturation, colorspace, &image->exception);
if (status != MagickFalse)
return status;
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Negate image.
*/
/* call opencl version */
status = AccelerateNegateImageChannel(image, channel, grayscale, &image->exception);
if (status == MagickTrue)
return status;
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(q) != GetPixelGreen(q)) ||
(GetPixelGreen(q) != GetPixelBlue(q)))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
*/
/*
ImageMagick 7 has a version of this function which does not use LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*sigmoidal_map;
register ssize_t
i;
ssize_t
y;
/*
Side effect: clamps values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
if (sharpen != MagickFalse)
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
else
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (
MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelRed(q))]));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelGreen(q))]));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelBlue(q))]));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelOpacity(q))]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImageChannel)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
LG_CC_FastSV5_64.c | //------------------------------------------------------------------------------
// LG_CC_FastSV5_64: connected components (64-bit version)
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//------------------------------------------------------------------------------
// Code is based on the algorithm described in the following paper
// Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component
// Algorithm with Fast Convergence (SIAM PP20)
// A subsequent update to the algorithm is here (which might not be reflected
// in this code):
//
// Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding
// connected components using linear algebra. J. Parallel Distributed Comput.
// 144: 14-27 (2020).
// Modified by Tim Davis, Texas A&M University
// The input graph G must be undirected, or directed but with an adjacency
// matrix with symmetric structure. Self-edges (diagonal entries) are OK, and
// are ignored. The values and type of G->A are ignored; just its structure is
// accessed.
// This function cannot be called by multiple user threads, since it unpacks
// G->A and then packs it back. G->A is unchanged when the function returns,
// but during execution G->A is empty.
#define LAGraph_FREE_ALL ;
#include "LG_internal.h"
#if !LG_VANILLA
#if (! LG_SUITESPARSE )
#error "SuiteSparse:GraphBLAS v6.0.0 or later required"
#endif
//------------------------------------------------------------------------------
// hash functions: todo describe me
//------------------------------------------------------------------------------
// hash table size must be a power of 2
#define HASH_SIZE 1024
// number of samples to insert into the hash table
// todo: this seems to be a lot of entries for a HASH_SIZE of 1024.
// There could be lots of collisions.
#define HASH_SAMPLES 864
#define HASH(x) (((x << 4) + x) & (HASH_SIZE-1))
#define NEXT(x) ((x + 23) & (HASH_SIZE-1))
//------------------------------------------------------------------------------
// ht_init: todo describe me
//------------------------------------------------------------------------------
// Clear the hash table counts (ht_val [0:HASH_SIZE-1] = 0), and set all hash
// table entries as empty (ht_key [0:HASH_SIZE-1] =-1).
// todo: the memset of ht_key is confusing
// todo: the name "ht_val" is confusing. It is not a value, but a count of
// the number of times the value x = ht_key [h] has been inserted into the
// hth position in the hash table. It should be renamed ht_cnt.
static inline void ht_init
(
int64_t *ht_key,
int64_t *ht_val
)
{
memset (ht_key, -1, sizeof (int64_t) * HASH_SIZE) ;
memset (ht_val, 0, sizeof (int64_t) * HASH_SIZE) ;
}
//------------------------------------------------------------------------------
// ht_sample: todo describe me
//------------------------------------------------------------------------------
//
static inline void ht_sample
(
uint64_t *V, // array of size n (todo: this is a bad variable name)
int64_t n,
int64_t samples, // number of samples to take from V
int64_t *ht_key,
int64_t *ht_val,
uint64_t *seed
)
{
for (int64_t k = 0 ; k < samples ; k++)
{
// select an entry from V at random
int64_t x = V [LAGraph_Random60 (seed) % n] ;
// find x in the hash table
int64_t h = HASH (x) ;
while (ht_key [h] != -1 && ht_key [h] != x)
{
h = NEXT (h) ;
}
ht_key [h] = x ;
ht_val [h]++ ;
}
}
//------------------------------------------------------------------------------
// ht_most_frequent: todo describe me
//------------------------------------------------------------------------------
// todo what if key is returned as -1? Code breaks. todo: handle this case
static inline int64_t ht_most_frequent
(
int64_t *ht_key,
int64_t *ht_val
)
{
int64_t key = -1 ;
int64_t val = 0 ; // max (ht_val [0:HASH_SIZE-1])
for (int64_t h = 0 ; h < HASH_SIZE ; h++)
{
if (ht_val [h] > val)
{
key = ht_key [h] ;
val = ht_val [h] ;
}
}
return (key) ; // return most frequent key
}
//------------------------------------------------------------------------------
// Reduce_assign: w (index) += s, using MIN as the "+=" accum operator
//------------------------------------------------------------------------------
// The index array, of size n can have duplicates. The vectors w and s are
// full (all entries present). This function computes:
//
// for (j = 0 ; j < n ; j++)
// {
// uint64_t i = index [j] ;
// w [i] = min (w [i], s [j]) ;
// }
//
// If C(i,j) = true where i == index [j], then this can be written with the
// min_second semiring:
//
// w = min (w, C*s)
static inline int Reduce_assign
(
GrB_Vector w, // vector of size n, all entries present
GrB_Vector s, // vector of size n, all entries present
GrB_Matrix C, // boolean matrix of size n-by-n
GrB_Index **Cp_handle, // array of size n+1, equal to 0:n
GrB_Index **Ci_handle, // index array of size n, can have duplicates
bool **Cx_handle, // array of size 1, equal to true
char *msg
)
{
// size of Cp, Ci, and Cx in bytes
GrB_Index n ;
GrB_TRY (GrB_Vector_size (&n, w)) ;
GrB_Index Cp_size = (n+1) * sizeof (GrB_Index) ;
GrB_Index Ci_size = n * sizeof (GrB_Index) ;
GrB_Index Cx_size = sizeof (bool) ;
// pack Cp, Ci, and Cx into a matrix C with C(i,j) = true if Ci(j) == i
bool iso = true ;
bool jumbled = false ;
GrB_TRY (GxB_Matrix_pack_CSC (C, Cp_handle, Ci_handle, (void **) Cx_handle,
Cp_size, Ci_size, Cx_size, iso, jumbled, NULL)) ;
// w = min (w, C*s) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (w, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, C, s, NULL)) ;
// unpack the contents of C
GrB_TRY (GxB_Matrix_unpack_CSC (C, Cp_handle, Ci_handle, (void **)Cx_handle,
&Cp_size, &Ci_size, &Cx_size, &iso, &jumbled, NULL)) ;
return (GrB_SUCCESS) ; // yay! It works!
}
//------------------------------------------------------------------------------
// LG_CC_FastSV5_64
//------------------------------------------------------------------------------
// The output of LG_CC_FastSV5 is a vector component, where
// component(i)=s if node i is in the connected compononent whose
// representative node is node s. If s is a representative, then
// component(s)=s. The number of connected components in the graph G is the
// number of representatives.
#undef LAGraph_FREE_ALL
#define LAGraph_FREE_ALL \
{ \
LAGraph_Free ((void **) &Cp) ; \
LAGraph_Free ((void **) &Cx) ; \
LAGraph_Free ((void **) &V) ; \
LAGraph_Free ((void **) &ht_key) ; \
LAGraph_Free ((void **) &ht_val) ; \
/* todo why is T not freed?? */ \
GrB_free (&t) ; \
GrB_free (&f) ; \
GrB_free (&gp) ; \
GrB_free (&mngp) ; \
GrB_free (&gp_new) ; \
GrB_free (&mod) ; \
}
#endif
int LG_CC_FastSV5_64 // SuiteSparse:GraphBLAS method, with GxB extensions
(
// output
GrB_Vector *component, // component(i)=s if node is in the component s
// inputs
LAGraph_Graph G, // input graph, G->A can change
char *msg
)
{
#if LG_VANILLA
LG_CHECK (0, -1, "SuiteSparse required for this method") ;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
LG_CLEAR_MSG ;
uint64_t *V = NULL ;
int64_t *ht_key = NULL, *ht_val = NULL ;
GrB_Index n, nnz ;
GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL,
t = NULL ;
GrB_Matrix T = NULL, C = NULL ;
GrB_Index *Cp = NULL ;
GrB_Index Cp_size = 0 ;
bool *Cx = NULL ;
LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ;
LG_CHECK (component == NULL, -1, "component parameter is NULL") ;
if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED ||
(G->kind == LAGRAPH_ADJACENCY_DIRECTED &&
G->A_structure_is_symmetric == LAGRAPH_TRUE))
{
// A must be symmetric
;
}
else
{
// A must not be unsymmetric
LG_CHECK (false, -1, "input must be symmetric") ;
}
GrB_Matrix S = G->A ;
GrB_TRY (GrB_Matrix_nrows (&n, S)) ;
GrB_TRY (GrB_Matrix_nvals (&nnz, S)) ;
#define FASTSV_SAMPLES 4
bool sampling = (n * FASTSV_SAMPLES * 2 < nnz) ;
// random number seed
uint64_t seed = n ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
// determine # of threads to use
int nthreads ;
LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ;
nthreads = LAGraph_MIN (nthreads, n / 16) ;
nthreads = LAGraph_MAX (nthreads, 1) ;
// vectors
GrB_TRY (GrB_Vector_new (&f, GrB_UINT64, n)) ;
GrB_TRY (GrB_Vector_new (&gp_new, GrB_UINT64, n)) ;
GrB_TRY (GrB_Vector_new (&mod, GrB_BOOL, n)) ;
V = LAGraph_Malloc (n, sizeof (uint64_t)) ;
GrB_TRY (GrB_assign (f, NULL, NULL, 0, GrB_ALL, n, NULL)) ;
GrB_TRY (GrB_apply (f, NULL, NULL, GrB_ROWINDEX_INT64, f, 0, NULL)) ;
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
bool V_is_identity = true ; // true if V is 0:n-1
GrB_TRY (GrB_Vector_dup (&gp, f)) ;
GrB_TRY (GrB_Vector_dup (&mngp, f)) ;
// allocate the hash table
ht_key = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ;
ht_val = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ;
LG_CHECK (ht_key == NULL || ht_val == NULL, -1, "out of memory") ;
// create Cp = 0:n, and Cx = true, and the empty C matrix
GrB_TRY (GrB_Vector_new (&t, GrB_INT64, n+1)) ;
GrB_TRY (GrB_assign (t, NULL, NULL, 0, GrB_ALL, n+1, NULL)) ;
GrB_TRY (GrB_apply (t, NULL, NULL, GrB_ROWINDEX_INT64, t, 0, NULL)) ;
GrB_TRY (GxB_Vector_unpack_Full (t, (void **) &Cp, &Cp_size, NULL, NULL)) ;
Cx = (bool *) LAGraph_Malloc (1, sizeof (bool)) ;
Cx [0] = true ;
GrB_TRY (GrB_free (&t)) ;
GrB_TRY (GrB_Matrix_new (&C, GrB_BOOL, n, n)) ;
//--------------------------------------------------------------------------
// sample phase
//--------------------------------------------------------------------------
if (sampling)
{
//----------------------------------------------------------------------
// export S = G->A in CSR format
//----------------------------------------------------------------------
// S is not modified. It is only exported so that its contents can be
// read by the parallel loops below.
GrB_Type type ;
GrB_Index nrows, ncols, nvals ;
size_t typesize ;
int64_t nonempty ;
GrB_Index *Sp, *Sj ;
void *Sx ;
bool S_jumbled = false ;
GrB_Index Sp_size, Sj_size, Sx_size ;
bool S_iso = false ;
GrB_TRY (GrB_Matrix_nvals (&nvals, S)) ;
GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj,
&Sx, &Sp_size, &Sj_size, &Sx_size,
&S_iso, &S_jumbled, NULL)) ;
GrB_TRY (GxB_Type_size (&typesize, type)) ;
G->A = NULL ;
//----------------------------------------------------------------------
// allocate space to construct T
//----------------------------------------------------------------------
GrB_Index Tp_len = nrows+1, Tp_size = Tp_len*sizeof(GrB_Index);
GrB_Index Tj_len = nvals, Tj_size = Tj_len*sizeof(GrB_Index);
GrB_Index Tx_len = nvals ;
GrB_Index *Tp = LAGraph_Malloc (Tp_len, sizeof (GrB_Index)) ;
GrB_Index *Tj = LAGraph_Malloc (Tj_len, sizeof (GrB_Index)) ;
GrB_Index Tx_size = typesize ;
void *Tx = LAGraph_Calloc (1, typesize) ; // T is iso
// todo check out-of-memory conditions
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
int64_t *range = LAGraph_Malloc (nthreads + 1, sizeof (int64_t)) ;
GrB_Index *count = LAGraph_Malloc (nthreads + 1, sizeof (GrB_Index)) ;
// todo check out-of-memory conditions
memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ;
//----------------------------------------------------------------------
// define parallel tasks to construct T
//----------------------------------------------------------------------
// thread tid works on rows range[tid]:range[tid+1]-1 of S and T
for (int tid = 0 ; tid <= nthreads ; tid++)
{
range [tid] = (n * tid + nthreads - 1) / nthreads ;
}
//----------------------------------------------------------------------
// determine the number entries to be constructed in T for each thread
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
int64_t deg = Sp [i + 1] - Sp [i] ;
count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ;
}
}
//----------------------------------------------------------------------
// count = cumsum (count)
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
count [tid + 1] += count [tid] ;
}
//----------------------------------------------------------------------
// construct T
//----------------------------------------------------------------------
// T (i,:) consists of the first FASTSV_SAMPLES of S (i,:).
// todo: this could be done by GxB_Select, using a new operator. Need
// to define a set of GxB_SelectOp operators that would allow for this.
// Note that Tx is not modified. Only Tp and Tj are constructed.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index p = count [tid] ;
Tp [range [tid]] = p ;
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
// construct T (i,:) from the first entries in S (i,:)
for (int64_t j = 0 ;
j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++)
{
Tj [p++] = Sj [Sp [i] + j] ;
}
Tp [i + 1] = p ;
}
}
//----------------------------------------------------------------------
// import the result into the GrB_Matrix T
//----------------------------------------------------------------------
// Note that Tx is unmodified.
// in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries
GrB_Index Tp_siz = Tp_size ;
GrB_Index Tj_siz = Tj_size ;
GrB_Index Tx_siz = Tx_size ;
GrB_Index t_nvals = Tp [nrows] ;
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
true, // T is iso
S_jumbled, NULL)) ;
//----------------------------------------------------------------------
// find the connected components of T
//----------------------------------------------------------------------
// TODO: this is identical to the final phase below. Make it a function
bool diff = true ;
while (diff)
{
// hooking & shortcutting
// mngp = min (mngp, T*gp) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ;
if (!V_is_identity)
{
// f = min (f, C*mngp) where C is C(i,j) = true if i=V(j)
LAGraph_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ;
}
// f = min (f, mngp, gp)
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64,
mngp, gp, NULL)) ;
// calculate grandparent: gp_new = f (f)
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ;
V_is_identity = false ;
// terminate if gp and gb_new are the same
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new,
gp, NULL)) ;
GrB_TRY (GrB_reduce (&diff, NULL, GrB_LOR_MONOID_BOOL, mod,
NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
//----------------------------------------------------------------------
// estimate the largest connected component
//----------------------------------------------------------------------
// key is set to the representative node of the largest connected
// component. Since sampling is used, this is an estimate
ht_init (ht_key, ht_val) ;
ht_sample (V, n, HASH_SAMPLES, ht_key, ht_val, &seed) ;
int64_t key = ht_most_frequent (ht_key, ht_val) ;
// todo: what if key is returned as -1? Then T below is invalid.
//----------------------------------------------------------------------
// collapse the largest connected component
//----------------------------------------------------------------------
// All edges in, or to, the largest connected component is removed from
// T. Next, if the row T(i,:) has enough space, and node i is not in
// the largest connected component, a single edge T(i,key) = true is
// added.
int64_t t_nonempty = -1 ;
bool T_jumbled = false, T_iso = true ;
// export T
GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj,
&Tx, &Tp_siz, &Tj_siz, &Tx_siz,
&T_iso, &T_jumbled, NULL)) ;
// FIXME: This parallel loop is badly load balanced. Each thread
// operates on the same number of rows of S, regardless of how many
// entries appear in each set of rows. It uses one thread per task,
// statically scheduled.
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(||:T_jumbled)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Sp [range [tid]] ;
// thread tid scans S (range [tid]:range [tid+1]-1,:),
// and constructs T(i,:) for all rows in this range.
bool my_T_jumbled = false ;
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
int64_t pv = V [i] ; // what is pv?
Tp [i] = ptr ; // start the construction of T(i,:)
// T(i,:) is empty if pv == key
if (pv != key)
{
// scan S(i,:)
for (GrB_Index p = Sp [i] ; p < Sp [i+1] ; p++)
{
// get S(i,j)
int64_t j = Sj [p] ;
if (V [j] != key)
{
// add the entry T(i,j) to T, but skip it if
// V [j] is equal to key
Tj [ptr++] = j ;
}
}
// add the entry T(i,key) if there is room for it in T(i,:)
if (ptr - Tp [i] < Sp [i+1] - Sp [i])
{
Tj [ptr++] = key ;
// this step can cause T to become jumbled
my_T_jumbled = true ;
}
}
}
T_jumbled = T_jumbled || my_T_jumbled ;
// count the number of entries inserted into T by this thread
count [tid] = ptr - Tp [range [tid]] ;
}
// Compact empty space out of Tj not filled in from the above phase.
// This is a lot of work and should be done in parallel.
GrB_Index offset = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
memcpy (Tj + offset, Tj + Tp [range [tid]],
sizeof (GrB_Index) * count [tid]) ;
offset += count [tid] ;
count [tid] = offset - count [tid] ;
}
// Compact empty space out of Tp
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Tp [range [tid]] ;
for (int64_t i = range [tid] ; i < range [tid+1] ; i++)
{
Tp [i] -= ptr - count [tid] ;
}
}
// finalize T
Tp [n] = offset ;
// free workspace
LAGraph_Free ((void **) &count) ;
LAGraph_Free ((void **) &range) ;
// import S (unchanged since last export)
GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols,
&Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size,
S_iso, S_jumbled, NULL)) ;
// import T for the final phase
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
T_iso, T_jumbled, NULL)) ;
// restore G->A
G->A = S ;
}
else
{
// no sampling; the final phase operates on the whole graph
T = S ;
}
//--------------------------------------------------------------------------
// final phase
//--------------------------------------------------------------------------
GrB_TRY (GrB_Matrix_nvals (&nnz, T)) ;
bool diff = true ;
while (diff && nnz > 0)
{
// hooking & shortcutting
// mngp = min (mngp, T*gp) using the MIN_SECOND semiring
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64,
GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ;
if (!V_is_identity)
{
// f = min (f, C*mngp) where C is C(i,j) = true if i=V(j)
GrB_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ;
V_is_identity = false ;
}
// f = min (f, mngp, gp)
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64,
mngp, gp, NULL)) ;
// calculate grandparent: gp_new = f (f)
GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ;
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ;
// terminate if gp and gb_new are the same
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new, gp,
NULL)) ;
GrB_TRY (GrB_reduce (&diff, NULL, GrB_LOR_MONOID_BOOL, mod, NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*component) = f ;
f = NULL ;
if (sampling)
{
GrB_free (&T) ;
}
LAGraph_FREE_ALL ;
return (0) ;
#endif
}
|
ParallelHashMap.h | /**
* @file ParallelHashMap.h
* @brief A thread-safe hash map supporting insertion and lookup operations
* @details The parallel hash map is built on top of a fixed-sized hash map
* object and features OpenMP concurrency structures. The underlying
* fixed-sized hash map handles collisions with chaining.
* @date June 6, 2015
* @author Geoffrey Gunow, MIT, Course 22 (geogunow@mit.edu)
*/
#ifndef __PARALLEL_HASH_MAP__
#define __PARALLEL_HASH_MAP__
#include<iostream>
#include<stdexcept>
#include<functional>
#ifdef OPENMP
#include<omp.h>
#endif
/**
* @class FixedHashMap ParallelHashMap.h "src/ParallelHashMap.h"
* @brief A fixed-size hash map supporting insertion and lookup operations
* @details The FixedHashMap class supports insertion and lookup operations
* but not deletion as deletion is not needed in the OpenMOC application.
* This hash table uses chaining for collisions and does not incorporate
* concurrency objects except for tracking the number of entries in the
* table for which an atomic increment is used. This hash table is not
* thread safe but is used as a building block for the ParallelHashMap
* class. This table guarantees O(1) insertions and lookups on average.
*/
template <class K, class V>
class FixedHashMap
{
struct node
{
node(K k_in, V v_in) : next(NULL), key(k_in), value(v_in){}
K key;
V value;
node *next;
};
private:
size_t _M; /* table size */
size_t _N; /* number of elements present in table */
node ** _buckets; /* buckets of values stored in nodes */
public:
FixedHashMap(size_t M = 64);
virtual ~FixedHashMap();
bool contains(K key);
V& at(K key);
void insert(K key, V value);
int insert_and_get_count(K key, V value);
size_t size();
size_t bucket_count();
K* keys();
V* values();
void clear();
void print_buckets();
};
/**
* @class ParallelHashMap ParallelHashMap.h "src/ParallelHashMap.h"
* @brief A thread-safe hash map supporting insertion and lookup operations
* @details The ParallelHashMap class is built on top of the FixedHashMap
* class, supporting insertion and lookup operations but not deletion as
* deletion is not needed in the OpenMOC application. This hash table uses
* chaining for collisions, as defined in FixedHashMap. It offers lock
* free lookups in O(1) time on average and fine-grained locking for
* insertions in O(1) time on average as well. Resizing is conducted
* periodically during inserts, although the starting table size can be
* chosen to limit the number of resizing operations.
*/
template <class K, class V>
class ParallelHashMap
{
/* padded pointer to hash table to avoid false sharing */
struct paddedPointer
{
volatile long pad_L1;
volatile long pad_L2;
volatile long pad_L3;
volatile long pad_L4;
volatile long pad_L5;
volatile long pad_L7;
volatile long pad_L8;
FixedHashMap<K,V>* volatile value;
volatile long pad_R1;
volatile long pad_R2;
volatile long pad_R3;
volatile long pad_R4;
volatile long pad_R5;
volatile long pad_R6;
volatile long pad_R7;
volatile long pad_R8;
};
private:
FixedHashMap<K,V> *_table;
paddedPointer *_announce;
size_t _num_threads;
size_t _N;
#ifdef OPENMP
omp_lock_t * _locks;
size_t _num_locks;
#endif
void resize();
public:
ParallelHashMap(size_t M = 64, size_t L = 64);
virtual ~ParallelHashMap();
bool contains(K key);
V& at(K key);
void insert(K key, V value);
int insert_and_get_count(K key, V value);
size_t size();
size_t bucket_count();
size_t num_locks();
K* keys();
V* values();
void clear();
void print_buckets();
};
/**
* @brief Constructor initializes fixed-size table of buckets filled with empty
* linked lists.
* @details The constructor initializes a fixed-size hash map with the size
* as an input parameter. If no size is given the default size (64)
* is used. Buckets are filled with empty linked lists presented as
* NULL pointers.
* @param M size of fixed hash map
*/
template <class K, class V>
FixedHashMap<K,V>::FixedHashMap(size_t M)
{
/* ensure M is a power of 2 */
if ((M & (M-1)) != 0)
{
/* if not, round up to nearest power of 2 */
M--;
for (size_t i = 1; i < 8 * sizeof(size_t); i*=2)
M |= M >> i;
M++;
}
/* allocate table */
_M = M;
_N = 0;
_buckets = new node*[_M]();
}
/**
* @brief Destructor deletes all nodes in the linked lists associated with each
* bucket in the fixed-size table and their pointers.
*/
template <class K, class V>
FixedHashMap<K,V>::~FixedHashMap()
{
/* for each bucket, scan through linked list and delete all nodes */
for (size_t i=0; i<_M; i++)
{
node *iter_node = _buckets[i];
while (iter_node != NULL)
{
node *next_node = iter_node->next;
delete iter_node;
iter_node = next_node;
}
}
/* delete all buckets (now pointers to empty linked lists) */
delete[] _buckets;
}
/**
* @brief Determine whether the fixed-size table contains a given key
* @details The linked list in the bucket associated with the key is searched
* to determine whether the key is present.
* @param key key to be searched
* @return boolean value referring to whether the key is contained in the map
*/
template <class K, class V>
bool FixedHashMap<K,V>::contains(K key)
{
/* get hash into table assuming M is a power of 2, using fast modulus */
size_t key_hash = std::hash<K>()(key) & (_M-1);
/* search corresponding bucket for key */
node *iter_node = _buckets[key_hash];
while (iter_node != NULL)
{
if (iter_node->key == key)
return true;
else
iter_node = iter_node->next;
}
return false;
}
/**
* @brief Determine the value associated with a given key in the fixed-size
* table.
* @details The linked list in the bucket associated with the key is searched
* and once the key is found, the corresponding value is returned.
* An exception is thrown if the key is not present in the map.
* @param key key whose corresponding value is desired
* @return value associated with the given key
*/
template <class K, class V>
V& FixedHashMap<K,V>::at(K key)
{
/* get hash into table assuming M is a power of 2, using fast modulus */
size_t key_hash = std::hash<K>()(key) & (_M-1);
/* search bucket for key and return the corresponding value if found */
node *iter_node = _buckets[key_hash];
while (iter_node != NULL)
if (iter_node->key == key)
return iter_node->value;
else
iter_node = iter_node->next;
/* after the bucket has been completely searched without finding the key,
throw an exception */
throw std::out_of_range("Key not present in map");
}
/**
* @brief Inserts a key/value pair into the fixed-size table.
* @details The specified key value pair is inserted into the fixed-size table.
* If the key already exists in the table, the pair is not inserted
* and the function returns.
* @param key key of the key/value pair to be inserted
* @param value value of the key/value pair to be inserted
*/
template <class K, class V>
void FixedHashMap<K,V>::insert(K key, V value)
{
/* get hash into table using fast modulus */
size_t key_hash = std::hash<K>()(key) & (_M-1);
/* check to see if key already exists in map */
if (contains(key))
return;
/* create new node */
node *new_node = new node(key, value);
/* find where to place element in linked list */
node **iter_node = &_buckets[key_hash];
while (*iter_node != NULL)
iter_node = &(*iter_node)->next;
/* place element in linked list */
*iter_node = new_node;
/* increment counter */
#pragma omp atomic
_N++;
}
/**
* @brief Inserts a key/value pair into the fixed-size table and returns the
* order number with which it was inserted.
* @details The specified key value pair is inserted into the fixed-size table.
* If the key already exists in the table, the pair is not inserted
* and the function returns -1.
* @param key key of the key/value pair to be inserted
* @param value value of the key/value pair to be inserted
* @return order number in which key/value pair was inserted, -1 is returned if
* key was already present in map.
*/
template <class K, class V>
int FixedHashMap<K,V>::insert_and_get_count(K key, V value)
{
/* get hash into table using fast modulus */
size_t key_hash = std::hash<K>()(key) & (_M-1);
/* check to see if key already exists in map */
if (contains(key))
return -1;
/* create new node */
node *new_node = new node(key, value);
/* find where to place element in linked list */
node **iter_node = &_buckets[key_hash];
while (*iter_node != NULL)
iter_node = &(*iter_node)->next;
/* place element in linked list */
*iter_node = new_node;
/* increment counter and return number */
size_t N;
#pragma omp critical (node_incr)
{
N = _N++;
}
return (int) N;
}
/**
* @brief Returns the number of key/value pairs in the fixed-size table
* @return number of key/value pairs in the map
*/
template <class K, class V>
size_t FixedHashMap<K,V>::size()
{
return _N;
}
/**
* @brief Returns the number of buckets in the fixed-size table
* @return number of buckets in the map
*/
template <class K, class V>
size_t FixedHashMap<K,V>::bucket_count()
{
return _M;
}
/**
* @brief Returns an array of the keys in the fixed-size table
* @details All buckets are scanned in order to form a list of all keys
* present in the table and then the list is returned. WARNING: The user
* is responsible for freeing the allocated memory once the array is no
* longer needed.
* @return an array of keys in the map whose length is the number of key/value
* pairs in the table.
*/
template <class K, class V>
K* FixedHashMap<K,V>::keys()
{
/* allocate array of keys */
K *key_list = new K[_N];
/* fill array with keys */
size_t ind = 0;
for (size_t i=0; i<_M; i++)
{
node *iter_node = _buckets[i];
while (iter_node != NULL)
{
key_list[ind] = iter_node->key;
iter_node = iter_node->next;
ind++;
}
}
return key_list;
}
/**
* @brief Returns an array of the values in the fixed-size table
* @details All buckets are scanned in order to form a list of all values
* present in the table and then the list is returned. WARNING: The user
* is responsible for freeing the allocated memory once the array is no
* longer needed.
* @return an array of values in the map whose length is the number of
* key/value pairs in the table.
*/
template <class K, class V>
V* FixedHashMap<K,V>::values()
{
/* allocate array of values */
V *values = new V[_N];
/* fill array with values */
size_t ind = 0;
for (size_t i=0; i<_M; i++)
{
node *iter_node = _buckets[i];
while (iter_node != NULL)
{
values[ind] = iter_node->value;
iter_node = iter_node->next;
ind++;
}
}
return values;
}
/**
* @brief Clears all key/value pairs form the hash table.
*/
template <class K, class V>
void FixedHashMap<K,V>::clear()
{
/* for each bucket, scan through linked list and delete all nodes */
for (size_t i=0; i<_M; i++)
{
node *iter_node = _buckets[i];
while (iter_node != NULL)
{
node *next_node = iter_node->next;
delete iter_node;
iter_node = next_node;
}
}
/* reset each bucket to null */
for (size_t i=0; i<_M; i++)
_buckets[i] = NULL;
/* reset the number of entries to zero */
_N = 0;
}
/**
* @brief Prints the contents of each bucket to the screen
* @details All buckets are scanned and the contents of the buckets are
* printed, which are pointers to linked lists. If the pointer is NULL
* suggesting that the linked list is empty, NULL is printed to the
* screen.
*/
template <class K, class V>
void FixedHashMap<K,V>::print_buckets()
{
for (size_t i=0; i<_M; i++)
{
if (_buckets[i] == NULL)
std::cout << i << " -> NULL" << std::endl;
else
std::cout << i << " -> " << _buckets[i] << std::endl;
}
}
/**
* @brief Constructor generates initial underlying table as a fixed-sized
* hash map and intializes concurrency structures.
*/
template <class K, class V>
ParallelHashMap<K,V>::ParallelHashMap(size_t M, size_t L)
{
/* allocate table */
_table = new FixedHashMap<K,V>(M);
/* get number of threads and create concurrency structures */
_num_threads = 1;
#ifdef OPENMP
_num_threads = omp_get_max_threads();
_num_locks = L;
_locks = new omp_lock_t[_num_locks];
for (size_t i=0; i<_num_locks; i++)
omp_init_lock(&_locks[i]);
#endif
_announce = new paddedPointer[_num_threads];
}
/**
* @brief Destructor frees memory associated with fixed-sized hash map and
* concurrency structures.
*/
template <class K, class V>
ParallelHashMap<K,V>::~ParallelHashMap()
{
delete _table;
#ifdef OPENMP
delete[] _locks;
#endif
delete[] _announce;
}
/**
* @brief Determine whether the parallel hash map contains a given key
* @details First the thread accessing the table announces its presence and
* which table it is reading. Then the linked list in the bucket
* associated with the key is searched without setting any locks
* to determine whether the key is present. When the thread has
* finished accessing the table, the announcement is reset to NULL.
* The announcement ensures that the data in the map is not freed
* during a resize until all threads have finished accessing the map.
* @param key key to be searched
* @return boolean value referring to whether the key is contained in the map
*/
template <class K, class V>
bool ParallelHashMap<K,V>::contains(K key)
{
/* get thread ID */
size_t tid = 0;
#ifdef OPENMP
tid = omp_get_thread_num();
#endif
/* get pointer to table, announce it will be searched,
and ensure consistency */
FixedHashMap<K,V> *table_ptr;
do{
table_ptr = _table;
_announce[tid].value = table_ptr;
} while (table_ptr != _table);
/* see if current table contains the thread */
bool present = table_ptr->contains(key);
/* reset table announcement to not searching */
_announce[tid].value = NULL;
return present;
}
/**
* @brief Determine the value associated with a given key.
* @details This function follows the same algorithm as <contains> except that
* the value associated with the searched key is returned.
* First the thread accessing the table announces its presence and
* which table it is reading. Then the linked list in the bucket
* associated with the key is searched without setting any locks
* to determine the associated value. An exception is thrown if the
* key is not found. When the thread has finished accessing the table,
* the announcement is reset to NULL. The announcement ensures that
* the data in the map is not freed during a resize until all threads
* have finished accessing the map.
* @param key key to be searched
* @return value associated with the key
*/
template <class K, class V>
V& ParallelHashMap<K,V>::at(K key)
{
/* get thread ID */
size_t tid = 0;
#ifdef OPENMP
tid = omp_get_thread_num();
#endif
/* get pointer to table, announce it will be searched */
FixedHashMap<K,V> *table_ptr;
do{
table_ptr = _table;
_announce[tid].value = table_ptr;
} while (table_ptr != _table);
/* get value associated with the key in the underlying table */
V& value = table_ptr->at(key);
/* reset table announcement to not searching */
_announce[tid].value = NULL;
return value;
}
/**
* @brief Insert a given key/value pair into the parallel hash map.
* @details First the underlying table is checked to determine if a resize
* should be conducted. Then, the table is checked to see if it
* already contains the key. If so, the key/value pair is not inserted
* and the function returns. Otherwise, the lock of the associated
* bucket is acquired and the key/value pair is added to the bucket.
* @param key key of the key/value pair to be inserted
* @param value value of the key/value pair to be inserted
*/
template <class K, class V>
void ParallelHashMap<K,V>::insert(K key, V value)
{
/* check if resize needed */
if (2*_table->size() > _table->bucket_count())
resize();
/* check to see if key is already contained in the table */
if (contains(key))
return;
/* get lock hash */
#ifdef OPENMP
size_t lock_hash = (std::hash<K>()(key) & (_table->bucket_count() - 1))
% _num_locks;
/* acquire lock */
omp_set_lock(&_locks[lock_hash]);
#endif
/* insert value */
_table->insert(key, value);
/* release lock */
#ifdef OPENMP
omp_unset_lock(&_locks[lock_hash]);
#endif
}
/**
* @brief Insert a given key/value pair into the parallel hash map and return
the order number.
* @details First the underlying table is checked to determine if a resize
* should be conducted. Then, the table is checked to see if it
* already contains the key. If so, the key/value pair is not inserted
* and the function returns. Otherwise, the lock of the associated
* bucket is acquired and the key/value pair is added to the bucket.
* @param key key of the key/value pair to be inserted
* @param value value of the key/value pair to be inserted
* @return order number in which the key/value pair was inserted, -1 if it
* already exists
*/
template <class K, class V>
int ParallelHashMap<K,V>::insert_and_get_count(K key, V value)
{
/* check if resize needed */
if (2*_table->size() > _table->bucket_count())
resize();
/* check to see if key is already contained in the table */
if (contains(key))
return -1;
/* get lock hash */
#ifdef OPENMP
size_t lock_hash = (std::hash<K>()(key) & (_table->bucket_count() - 1))
% _num_locks;
/* acquire lock */
omp_set_lock(&_locks[lock_hash]);
#endif
/* insert value */
int N =_table->insert_and_get_count(key, value);
/* release lock */
#ifdef OPENMP
omp_unset_lock(&_locks[lock_hash]);
#endif
return N;
}
/**
* @brief Resizes the underlying table to twice its current capacity.
* @details In a thread-safe manner, this procedure resizes the underlying
* FixedHashMap table to twice its current capacity using locks and the
* announce array. First, all locks are set in order to block inserts and
* prevent deadlock. A new table is allocated of twice the size and all
* key/value pairs from the old table, then the pointer is switched to the
* new table and locks are released. Finally the memory needs to be freed.
* To prevent threads currently reading the table from encountering
* segmentation faults, the resizing threads waits for the announce array
* to be free of references to the old table before freeing the memory.
*/
template <class K, class V>
void ParallelHashMap<K,V>::resize()
{
/* acquire all locks in order */
#ifdef OPENMP
for (size_t i=0; i<_num_locks; i++)
omp_set_lock(&_locks[i]);
#endif
/* recheck if resize needed */
if (2*_table->size() < _table->bucket_count())
{
/* release locks */
#ifdef OPENMP
for (size_t i=0; i<_num_locks; i++)
omp_unset_lock(&_locks[i]);
#endif
return;
}
/* allocate new hash map of double the size */
FixedHashMap<K,V> *new_map =
new FixedHashMap<K,V>(2*_table->bucket_count());
/* get keys, values, and number of elements */
K *key_list = _table->keys();
V *value_list = _table->values();
/* insert key/value pairs into new hash map */
for (size_t i=0; i<_table->size(); i++)
new_map->insert(key_list[i], value_list[i]);
/* save pointer of old table */
FixedHashMap<K,V> *old_table = _table;
/* reassign pointer */
_table = new_map;
/* release all locks */
#ifdef OPENMP
for (size_t i=0; i<_num_locks; i++)
omp_unset_lock(&_locks[i]);
#endif
/* delete key and value list */
delete[] key_list;
delete[] value_list;
/* wait for all threads to stop reading from the old table */
for (size_t i=0; i<_num_threads; i++)
while (_announce[i].value == old_table) {};
/* free memory associated with old table */
delete old_table;
}
/**
* @brief Returns the number of key/value pairs in the underlying table
* @return number of key/value pairs in the map
*/
template <class K, class V>
size_t ParallelHashMap<K,V>::size()
{
return _table->size();
}
/**
* @brief Returns the number of buckets in the underlying table
* @return number of buckets in the map
*/
template <class K, class V>
size_t ParallelHashMap<K,V>::bucket_count()
{
return _table->bucket_count();
}
/**
* @brief Returns the number of locks in the parallel hash map
* @return number of locks in the map
*/
template <class K, class V>
size_t ParallelHashMap<K,V>::num_locks()
{
return _num_locks;
}
/**
* @brief Returns an array of the keys in the underlying table
* @details All buckets are scanned in order to form a list of all keys
* present in the table and then the list is returned. Threads
* announce their presence to ensure table memory is not freed
* during access. WARNING: The user is responsible for freeing the
* allocated memory once the array is no longer needed.
* @return an array of keys in the map whose length is the number of key/value
* pairs in the table.
*/
template <class K, class V>
K* ParallelHashMap<K,V>::keys()
{
/* get thread ID */
size_t tid = 0;
#ifdef OPENMP
tid = omp_get_thread_num();
#endif
/* get pointer to table, announce it will be searched */
FixedHashMap<K,V> *table_ptr;
do{
table_ptr = _table;
_announce[tid].value = table_ptr;
} while (table_ptr != _table);
/* get key list */
K* key_list = table_ptr->keys();
/* reset table announcement to not searching */
_announce[tid].value = NULL;
return key_list;
}
/**
* @brief Returns an array of the values in the underlying table
* @details All buckets are scanned in order to form a list of all values
* present in the table and then the list is returned. Threads
* announce their presence to ensure table memory is not freed
* during access. WARNING: The user is responsible for freeing the
* allocated memory once the array is no longer needed.
* @return an array of values in the map whose length is the number of key/value
* pairs in the table.
*/
template <class K, class V>
V* ParallelHashMap<K,V>::values()
{
/* get thread ID */
size_t tid = 0;
#ifdef OPENMP
tid = omp_get_thread_num();
#endif
/* get pointer to table, announce it will be searched */
FixedHashMap<K,V> *table_ptr;
do{
table_ptr = _table;
_announce[tid].value = table_ptr;
} while (table_ptr != _table);
/* get value list */
V* value_list = table_ptr->values();
/* reset table announcement to not searching */
_announce[tid].value = NULL;
return value_list;
}
/**
* @brief Clears all key/value pairs form the hash table.
*/
template <class K, class V>
void ParallelHashMap<K,V>::clear()
{
/* acquire all locks in order */
#ifdef OPENMP
for (size_t i=0; i<_num_locks; i++)
omp_set_lock(&_locks[i]);
#endif
/* clear underlying fixed table */
_table->clear();
}
/**
* @brief Prints the contents of each bucket to the screen
* @details All buckets are scanned and the contents of the buckets are
* printed, which are pointers to linked lists. If the pointer is NULL
* suggesting that the linked list is empty, NULL is printed to the
* screen. Threads announce their presence to ensure table memory is
* not freed during access.
*/
template <class K, class V>
void ParallelHashMap<K,V>::print_buckets()
{
/* get thread ID */
size_t tid = 0;
#ifdef OPENMP
tid = omp_get_thread_num();
#endif
/* get pointer to table, announce it will be searched */
FixedHashMap<K,V> *table_ptr;
do{
table_ptr = _table;
_announce[tid].value = table_ptr;
} while (table_ptr != _table);
/* print buckets */
table_ptr->print_buckets();
/* reset table announcement to not searching */
_announce[tid].value = NULL;
}
#endif
|
operations.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file operations.h
* \brief Quantum operation functions
*/
#ifndef OPERATIONS_H_
#define OPERATIONS_H_
namespace qpp {
/**
* \brief Applies the controlled-gate \a A to the part \a target of the
* multi-partite state vector or density matrix \a state
* \see qpp::Gates::CTRL()
*
* \note The dimension of the gate \a A must match the dimension of \a target.
* Also, all control subsystems in \a ctrl must have the same dimension.
*
* \param state Eigen expression
* \param A Eigen expression
* \param ctrl Control subsystem indexes
* \param target Subsystem indexes where the gate \a A is applied
* \param dims Dimensions of the multi-partite system
* \param shift Performs the control as if the \a ctrl qudit states were
* \f$ X\f$-incremented component-wise by \a shift. If non-empty (default), the
* size of \a shift must be the same as the size of \a ctrl.
* \return CTRL-A gate applied to the part \a target of \a state
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
applyCTRL(const Eigen::MatrixBase<Derived1>& state,
const Eigen::MatrixBase<Derived2>& A, const std::vector<idx>& ctrl,
const std::vector<idx>& target, const std::vector<idx>& dims,
std::vector<idx> shift = {}) {
const typename Eigen::MatrixBase<Derived1>::EvalReturnType& rstate =
state.derived();
const dyn_mat<typename Derived2::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::applyCTRL()");
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::applyCTRL()");
// check zero sizes
if (!internal::check_nonzero_size(rstate))
throw exception::ZeroSize("qpp::applyCTRL()");
// check zero sizes
if (!internal::check_nonzero_size(target))
throw exception::ZeroSize("qpp::applyCTRL()");
// check square matrix for the gate
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::applyCTRL()");
// check valid state and matching dimensions
if (internal::check_cvector(rstate)) {
if (!internal::check_dims_match_cvect(dims, state))
throw exception::DimsMismatchCvector("qpp::applyCTRL()");
} else if (internal::check_square_mat(rstate)) {
if (!internal::check_dims_match_mat(dims, state))
throw exception::DimsMismatchMatrix("qpp::applyCTRL()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::applyCTRL()");
// check that ctrl subsystem is valid w.r.t. dims
if (!internal::check_subsys_match_dims(ctrl, dims))
throw exception::SubsysMismatchDims("qpp::applyCTRL()");
// check that all control subsystems have the same dimension
idx d = ctrl.size() > 0 ? dims[ctrl[0]] : 1;
for (idx i = 1; i < ctrl.size(); ++i)
if (dims[ctrl[i]] != d)
throw exception::DimsNotEqual("qpp::applyCTRL()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::applyCTRL()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::applyCTRL()");
// check that gate matches the dimensions of the target
std::vector<idx> target_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
target_dims[i] = dims[target[i]];
if (!internal::check_dims_match_mat(target_dims, rA))
throw exception::MatrixMismatchSubsys("qpp::applyCTRL()");
std::vector<idx> ctrlgate = ctrl; // ctrl + gate subsystem vector
ctrlgate.insert(std::end(ctrlgate), std::begin(target), std::end(target));
std::sort(std::begin(ctrlgate), std::end(ctrlgate));
// check that ctrl + gate subsystem is valid
// with respect to local dimensions
if (!internal::check_subsys_match_dims(ctrlgate, dims))
throw exception::SubsysMismatchDims("qpp::applyCTRL()");
// check shift
if (!shift.empty() && (shift.size() != ctrl.size()))
throw exception::SizeMismatch("qpp::applyCTRL()");
if (!shift.empty())
for (auto&& elem : shift)
if (elem >= d)
throw exception::OutOfRange("qpp::applyCTRL()");
// END EXCEPTION CHECKS
if (shift.empty())
shift = std::vector<idx>(ctrl.size(), 0);
// construct the table of A^i and (A^dagger)^i
std::vector<dyn_mat<typename Derived1::Scalar>> Ai;
std::vector<dyn_mat<typename Derived1::Scalar>> Aidagger;
for (idx i = 0; i < std::max(d, static_cast<idx>(2)); ++i) {
Ai.emplace_back(powm(rA, i));
Aidagger.emplace_back(powm(adjoint(rA), i));
}
idx D = static_cast<idx>(rstate.rows()); // total dimension
idx n = dims.size(); // total number of subsystems
idx ctrlsize = ctrl.size(); // number of ctrl subsystem
idx ctrlgatesize = ctrlgate.size(); // number of ctrl+gate subsystems
idx targetsize = target.size(); // number of subsystems of the target
// dimension of ctrl subsystem
idx Dctrl = static_cast<idx>(std::llround(std::pow(d, ctrlsize)));
idx DA = static_cast<idx>(rA.rows()); // dimension of gate subsystem
idx Cdims[maxn]; // local dimensions
idx CdimsA[maxn]; // local dimensions
idx CdimsCTRL[maxn]; // local dimensions
idx CdimsCTRLA_bar[maxn]; // local dimensions
// compute the complementary subsystem of ctrlgate w.r.t. dims
std::vector<idx> ctrlgate_bar = complement(ctrlgate, n);
// number of subsystems that are complementary to the ctrl+gate
idx ctrlgate_barsize = ctrlgate_bar.size();
idx DCTRLA_bar = 1; // dimension of the rest
for (idx i = 0; i < ctrlgate_barsize; ++i)
DCTRLA_bar *= dims[ctrlgate_bar[i]];
for (idx k = 0; k < n; ++k)
Cdims[k] = dims[k];
for (idx k = 0; k < targetsize; ++k)
CdimsA[k] = dims[target[k]];
for (idx k = 0; k < ctrlsize; ++k)
CdimsCTRL[k] = d;
for (idx k = 0; k < ctrlgate_barsize; ++k)
CdimsCTRLA_bar[k] = dims[ctrlgate_bar[k]];
// worker, computes the coefficient and the index for the ket case
// used in #pragma omp parallel for collapse
auto coeff_idx_ket = [&](idx i_, idx m_, idx r_) noexcept
->std::pair<typename Derived1::Scalar, idx> {
idx indx = 0;
typename Derived1::Scalar coeff = 0;
idx Cmidx[maxn]; // the total multi-index
idx CmidxA[maxn]; // the gate part multi-index
idx CmidxCTRLA_bar[maxn]; // the rest multi-index
// compute the index
// set the CTRL part
for (idx k = 0; k < ctrlsize; ++k) {
Cmidx[ctrl[k]] = (i_ + d - shift[k]) % d;
}
// set the rest
internal::n2multiidx(r_, n - ctrlgatesize, CdimsCTRLA_bar,
CmidxCTRLA_bar);
for (idx k = 0; k < n - ctrlgatesize; ++k) {
Cmidx[ctrlgate_bar[k]] = CmidxCTRLA_bar[k];
}
// set the A part
internal::n2multiidx(m_, targetsize, CdimsA, CmidxA);
for (idx k = 0; k < targetsize; ++k) {
Cmidx[target[k]] = CmidxA[k];
}
// we now got the total index
indx = internal::multiidx2n(Cmidx, n, Cdims);
// compute the coefficient
for (idx n_ = 0; n_ < DA; ++n_) {
internal::n2multiidx(n_, targetsize, CdimsA, CmidxA);
for (idx k = 0; k < targetsize; ++k) {
Cmidx[target[k]] = CmidxA[k];
}
coeff +=
Ai[i_](m_, n_) * rstate(internal::multiidx2n(Cmidx, n, Cdims));
}
return std::make_pair(coeff, indx);
}; /* end coeff_idx_ket */
// worker, computes the coefficient and the index
// for the density matrix case
// used in #pragma omp parallel for collapse
auto coeff_idx_rho = [&](idx i1_, idx m1_, idx r1_, idx i2_, idx m2_,
idx r2_) noexcept
->std::tuple<typename Derived1::Scalar, idx, idx> {
idx idxrow = 0;
idx idxcol = 0;
typename Derived1::Scalar coeff = 0, lhs = 1, rhs = 1;
idx Cmidxrow[maxn]; // the total row multi-index
idx Cmidxcol[maxn]; // the total col multi-index
idx CmidxArow[maxn]; // the gate part row multi-index
idx CmidxAcol[maxn]; // the gate part col multi-index
idx CmidxCTRLrow[maxn]; // the control row multi-index
idx CmidxCTRLcol[maxn]; // the control col multi-index
idx CmidxCTRLA_barrow[maxn]; // the rest row multi-index
idx CmidxCTRLA_barcol[maxn]; // the rest col multi-index
// compute the ket/bra indexes
// set the CTRL part
internal::n2multiidx(i1_, ctrlsize, CdimsCTRL, CmidxCTRLrow);
internal::n2multiidx(i2_, ctrlsize, CdimsCTRL, CmidxCTRLcol);
for (idx k = 0; k < ctrlsize; ++k) {
Cmidxrow[ctrl[k]] = CmidxCTRLrow[k];
Cmidxcol[ctrl[k]] = CmidxCTRLcol[k];
}
// set the rest
internal::n2multiidx(r1_, n - ctrlgatesize, CdimsCTRLA_bar,
CmidxCTRLA_barrow);
internal::n2multiidx(r2_, n - ctrlgatesize, CdimsCTRLA_bar,
CmidxCTRLA_barcol);
for (idx k = 0; k < n - ctrlgatesize; ++k) {
Cmidxrow[ctrlgate_bar[k]] = CmidxCTRLA_barrow[k];
Cmidxcol[ctrlgate_bar[k]] = CmidxCTRLA_barcol[k];
}
// set the A part
internal::n2multiidx(m1_, targetsize, CdimsA, CmidxArow);
internal::n2multiidx(m2_, targetsize, CdimsA, CmidxAcol);
for (idx k = 0; k < target.size(); ++k) {
Cmidxrow[target[k]] = CmidxArow[k];
Cmidxcol[target[k]] = CmidxAcol[k];
}
// we now got the total row/col indexes
idxrow = internal::multiidx2n(Cmidxrow, n, Cdims);
idxcol = internal::multiidx2n(Cmidxcol, n, Cdims);
// check whether all CTRL row and col multi indexes are equal
bool all_ctrl_rows_equal = true;
bool all_ctrl_cols_equal = true;
idx first_ctrl_row, first_ctrl_col;
if (ctrlsize > 0) {
first_ctrl_row = (CmidxCTRLrow[0] + shift[0]) % d;
first_ctrl_col = (CmidxCTRLcol[0] + shift[0]) % d;
} else {
first_ctrl_row = first_ctrl_col = 1;
}
for (idx k = 1; k < ctrlsize; ++k) {
if ((CmidxCTRLrow[k] + shift[k]) % d != first_ctrl_row) {
all_ctrl_rows_equal = false;
break;
}
}
for (idx k = 1; k < ctrlsize; ++k) {
if ((CmidxCTRLcol[k] + shift[k]) % d != first_ctrl_col) {
all_ctrl_cols_equal = false;
break;
}
}
// at least one control activated, compute the coefficient
for (idx n1_ = 0; n1_ < DA; ++n1_) {
internal::n2multiidx(n1_, targetsize, CdimsA, CmidxArow);
for (idx k = 0; k < targetsize; ++k) {
Cmidxrow[target[k]] = CmidxArow[k];
}
idx idxrowtmp = internal::multiidx2n(Cmidxrow, n, Cdims);
if (all_ctrl_rows_equal) {
lhs = Ai[first_ctrl_row](m1_, n1_);
} else {
lhs = (m1_ == n1_) ? 1 : 0; // identity matrix
}
for (idx n2_ = 0; n2_ < DA; ++n2_) {
internal::n2multiidx(n2_, targetsize, CdimsA, CmidxAcol);
for (idx k = 0; k < targetsize; ++k) {
Cmidxcol[target[k]] = CmidxAcol[k];
}
if (all_ctrl_cols_equal) {
rhs = Aidagger[first_ctrl_col](n2_, m2_);
} else {
rhs = (n2_ == m2_) ? 1 : 0; // identity matrix
}
idx idxcoltmp = internal::multiidx2n(Cmidxcol, n, Cdims);
coeff += lhs * rstate(idxrowtmp, idxcoltmp) * rhs;
}
}
return std::make_tuple(coeff, idxrow, idxcol);
}; /* end coeff_idx_rho */
//************ ket ************//
if (internal::check_cvector(rstate)) // we have a ket
{
// check that dims match state vector
if (!internal::check_dims_match_cvect(dims, rstate))
throw exception::DimsMismatchCvector("qpp::applyCTRL()");
if (D == 1)
return rstate;
dyn_mat<typename Derived1::Scalar> result = rstate;
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
for (idx m = 0; m < DA; ++m)
for (idx r = 0; r < DCTRLA_bar; ++r) {
if (ctrlsize == 0) // no control
{
result(coeff_idx_ket(1, m, r).second) =
coeff_idx_ket(1, m, r).first;
} else
for (idx i = 0; i < d; ++i) {
result(coeff_idx_ket(i, m, r).second) =
coeff_idx_ket(i, m, r).first;
}
}
return result;
}
//************ density matrix ************//
else // we have a density operator
{
// check that dims match state matrix
if (!internal::check_dims_match_mat(dims, rstate))
throw exception::DimsMismatchMatrix("qpp::applyCTRL()");
if (D == 1)
return rstate;
dyn_mat<typename Derived1::Scalar> result = rstate;
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(4)
#endif // WITH_OPENMP_
for (idx m1 = 0; m1 < DA; ++m1)
for (idx r1 = 0; r1 < DCTRLA_bar; ++r1)
for (idx m2 = 0; m2 < DA; ++m2)
for (idx r2 = 0; r2 < DCTRLA_bar; ++r2)
if (ctrlsize == 0) // no control
{
auto coeff_idxes =
coeff_idx_rho(1, m1, r1, 1, m2, r2);
result(std::get<1>(coeff_idxes),
std::get<2>(coeff_idxes)) =
std::get<0>(coeff_idxes);
} else {
for (idx i1 = 0; i1 < Dctrl; ++i1)
for (idx i2 = 0; i2 < Dctrl; ++i2) {
auto coeff_idxes =
coeff_idx_rho(i1, m1, r1, i2, m2, r2);
result(std::get<1>(coeff_idxes),
std::get<2>(coeff_idxes)) =
std::get<0>(coeff_idxes);
}
}
return result;
}
}
/**
* \brief Applies the controlled-gate \a A to the part \a target of the
* multi-partite state vector or density matrix \a state
* \see qpp::Gates::CTRL()
*
* \note The dimension of the gate \a A must match the dimension of \a target.
* Also, all control subsystems in \a ctrl must have the same dimension.
*
* \param state Eigen expression
* \param A Eigen expression
* \param ctrl Control subsystem indexes
* \param target Subsystem indexes where the gate \a A is applied
* \param d Subsystem dimensions
* \param shift Performs the control as if the \a ctrl qudit states were
* \f$ X\f$-incremented component-wise by \a shift. If non-empty (default), the
* size of \a shift must be the same as the size of \a ctrl.
* \return CTRL-A gate applied to the part \a target of \a state
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
applyCTRL(const Eigen::MatrixBase<Derived1>& state,
const Eigen::MatrixBase<Derived2>& A, const std::vector<idx>& ctrl,
const std::vector<idx>& target, idx d = 2,
const std::vector<idx>& shift = {}) {
const typename Eigen::MatrixBase<Derived1>::EvalReturnType& rstate =
state.derived();
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rstate))
throw exception::ZeroSize("qpp::applyCTRL()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::applyCTRL()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rstate.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return applyCTRL(rstate, rA, ctrl, target, dims, shift);
}
/**
* \brief Applies the gate \a A to the part \a target of the multi-partite state
* vector or density matrix \a state
*
* \note The dimension of the gate \a A must match the dimension of \a target
*
* \param state Eigen expression
* \param A Eigen expression
* \param target Subsystem indexes where the gate \a A is applied
* \param dims Dimensions of the multi-partite system
* \return Gate \a A applied to the part \a target of \a state
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
apply(const Eigen::MatrixBase<Derived1>& state,
const Eigen::MatrixBase<Derived2>& A, const std::vector<idx>& target,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived1>::EvalReturnType& rstate =
state.derived();
const dyn_mat<typename Derived2::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::apply()");
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::apply()");
// check zero sizes
if (!internal::check_nonzero_size(rstate))
throw exception::ZeroSize("qpp::apply()");
// check zero sizes
if (!internal::check_nonzero_size(target))
throw exception::ZeroSize("qpp::apply()");
// check square matrix for the gate
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::apply()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::apply()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::apply()");
// check valid state and matching dimensions
if (internal::check_cvector(rstate)) {
if (!internal::check_dims_match_cvect(dims, state))
throw exception::DimsMismatchCvector("qpp::apply()");
} else if (internal::check_square_mat(rstate)) {
if (!internal::check_dims_match_mat(dims, state))
throw exception::DimsMismatchMatrix("qpp::apply()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::apply()");
// check that gate matches the dimensions of the target
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
if (!internal::check_dims_match_mat(subsys_dims, rA))
throw exception::MatrixMismatchSubsys("qpp::apply()");
// END EXCEPTION CHECKS
//************ ket ************//
if (internal::check_cvector(rstate)) // we have a ket
return applyCTRL(rstate, rA, {}, target, dims);
//************ density matrix ************//
else // we have a density operator
return applyCTRL(rstate, rA, {}, target, dims);
}
/**
* \brief Applies the gate \a A to the part \a target of the multi-partite state
* vector or density matrix \a state
*
* \note The dimension of the gate \a A must match the dimension of \a target
*
* \param state Eigen expression
* \param A Eigen expression
* \param target Subsystem indexes where the gate \a A is applied
* \param d Subsystem dimensions
* \return Gate \a A applied to the part \a target of \a state
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
apply(const Eigen::MatrixBase<Derived1>& state,
const Eigen::MatrixBase<Derived2>& A, const std::vector<idx>& target,
idx d = 2) {
const typename Eigen::MatrixBase<Derived1>::EvalReturnType& rstate =
state.derived();
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rstate))
throw exception::ZeroSize("qpp::apply()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::apply()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rstate.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return apply(rstate, rA, target, dims);
}
/**
* \brief Applies the channel specified by the set of Kraus operators \a Ks to
* the density matrix \a A
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Output density matrix after the action of the channel
*/
template <typename Derived>
cmat apply(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks) {
const cmat& rA = A.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::apply()");
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::apply()");
if (Ks.empty())
throw exception::ZeroSize("qpp::apply()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::apply()");
if (Ks[0].rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::apply()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::apply()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(rA.rows(), rA.rows());
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < Ks.size(); ++i) {
#ifdef WITH_OPENMP_
#pragma omp critical
#endif // WITH_OPENMP_
{ result += Ks[i] * rA * adjoint(Ks[i]); }
}
return result;
}
/**
* \brief Applies the channel specified by the set of Kraus operators \a Ks to
* the part \a target of the multi-partite density matrix \a A
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes where the Kraus operators \a Ks are applied
* \param dims Dimensions of the multi-partite system
* \return Output density matrix after the action of the channel
*/
template <typename Derived>
cmat apply(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, const std::vector<idx>& dims) {
const cmat& rA = A.derived();
// EXCEPTION CHECKS
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::apply()");
// check zero sizes
if (!internal::check_nonzero_size(target))
throw exception::ZeroSize("qpp::apply()");
// check square matrix for the A
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::apply()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::apply()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::apply()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::apply()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::apply()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::apply()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
// check the Kraus operators
if (Ks.empty())
throw exception::ZeroSize("qpp::apply()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::apply()");
if (!internal::check_dims_match_mat(subsys_dims, Ks[0]))
throw exception::MatrixMismatchSubsys("qpp::apply()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::apply()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(rA.rows(), rA.rows());
for (idx i = 0; i < Ks.size(); ++i)
result += apply(rA, Ks[i], target, dims);
return result;
}
/**
* \brief Applies the channel specified by the set of Kraus operators \a Ks to
* the part \a target of the multi-partite density matrix \a A
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes where the Kraus operators \a Ks are applied
* \param d Subsystem dimensions
* \return Output density matrix after the action of the channel
*/
template <typename Derived>
cmat apply(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, idx d = 2) {
const cmat& rA = A.derived();
// EXCEPTION CHECKS
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::apply()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::apply()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return apply(rA, Ks, target, dims);
}
/**
* \brief Superoperator matrix
*
* Constructs the superoperator matrix of the channel specified by the set of
* Kraus operators \a Ks in the standard operator basis
* \f$\{|i\rangle\langle j|\}\f$ ordered in lexicographical order, i.e.
* \f$|0\rangle\langle 0|\f$, \f$|0\rangle\langle 1|\f$ etc.
*
* \param Ks Set of Kraus operators
* \return Superoperator matrix
*/
inline cmat kraus2super(const std::vector<cmat>& Ks) {
// EXCEPTION CHECKS
if (Ks.empty())
throw exception::ZeroSize("qpp::kraus2super()");
if (!internal::check_nonzero_size(Ks[0]))
throw exception::ZeroSize("qpp::kraus2super()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::kraus2super()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::kraus2super()");
// END EXCEPTION CHECKS
idx D = static_cast<idx>(Ks[0].rows());
cmat result(D * D, D * D);
cmat MN = cmat::Zero(D, D);
bra A = bra::Zero(D);
ket B = ket::Zero(D);
cmat EMN = cmat::Zero(D, D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
for (idx m = 0; m < D; ++m) {
for (idx n = 0; n < D; ++n) {
#ifdef WITH_OPENMP_
#pragma omp critical
#endif // WITH_OPENMP_
{
// compute E(|m><n|)
MN(m, n) = 1;
for (idx i = 0; i < Ks.size(); ++i)
EMN += Ks[i] * MN * adjoint(Ks[i]);
MN(m, n) = 0;
for (idx a = 0; a < D; ++a) {
A(a) = 1;
for (idx b = 0; b < D; ++b) {
// compute result(ab,mn)=<a|E(|m><n)|b>
B(b) = 1;
result(a * D + b, m * D + n) =
static_cast<cmat>(A * EMN * B).value();
B(b) = 0;
}
A(a) = 0;
}
EMN = cmat::Zero(D, D);
}
}
}
return result;
}
/**
* \brief Choi matrix
* \see qpp::choi2kraus()
*
* Constructs the Choi matrix of the channel specified by the set of Kraus
* operators \a Ks in the standard operator basis \f$\{|i\rangle\langle j|\}\f$
* ordered in lexicographical order, i.e.
* \f$|0\rangle\langle 0|\f$, \f$|0\rangle\langle 1|\f$ etc.
*
* \note The superoperator matrix \f$S\f$ and the Choi matrix \f$ C\f$ are
* related by \f$ S_{ab,mn} = C_{ma,nb}\f$
*
* \param Ks Set of Kraus operators
* \return Choi matrix
*/
inline cmat kraus2choi(const std::vector<cmat>& Ks) {
// EXCEPTION CHECKS
if (Ks.empty())
throw exception::ZeroSize("qpp::kraus2choi()");
if (!internal::check_nonzero_size(Ks[0]))
throw exception::ZeroSize("qpp::kraus2choi()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::kraus2choi()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::kraus2choi()");
// END EXCEPTION CHECKS
idx D = static_cast<idx>(Ks[0].rows());
// construct the D x D \sum |jj> vector
// (un-normalized maximally entangled state)
cmat MES = cmat::Zero(D * D, 1);
for (idx a = 0; a < D; ++a)
MES(a * D + a) = 1;
cmat Omega = MES * adjoint(MES);
cmat result = cmat::Zero(D * D, D * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < Ks.size(); ++i) {
#ifdef WITH_OPENMP_
#pragma omp critical
#endif // WITH_OPENMP_
{
result += kron(cmat::Identity(D, D), Ks[i]) * Omega *
adjoint(kron(cmat::Identity(D, D), Ks[i]));
}
}
return result;
}
/**
* \brief Orthogonal Kraus operators from Choi matrix
* \see qpp::kraus2choi()
*
* Extracts a set of orthogonal (under Hilbert-Schmidt operator norm) Kraus
* operators from the Choi matrix \a A
*
* \note The Kraus operators satisfy \f$Tr(K_i^\dagger K_j)=\delta_{ij}\f$ for
* all \f$i\neq j\f$
*
* \param A Choi matrix
* \return Set of orthogonal Kraus operators
*/
inline std::vector<cmat> choi2kraus(const cmat& A) {
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::choi2kraus()");
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::choi2kraus()");
idx D = internal::get_dim_subsys(A.rows(), 2);
// check equal dimensions
if (D * D != static_cast<idx>(A.rows()))
throw exception::DimsInvalid("qpp::choi2kraus()");
// END EXCEPTION CHECKS
dmat ev = hevals(A);
cmat evec = hevects(A);
std::vector<cmat> result;
for (idx i = 0; i < D * D; ++i) {
if (std::abs(ev(i)) > 0)
result.emplace_back(std::sqrt(std::abs(ev(i))) *
reshape(evec.col(i), D, D));
}
return result;
}
/**
* \brief Converts Choi matrix to superoperator matrix
* \see qpp::super2choi()
*
* \param A Choi matrix
* \return Superoperator matrix
*/
inline cmat choi2super(const cmat& A) {
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::choi2super()");
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::choi2super()");
idx D = internal::get_dim_subsys(static_cast<idx>(A.rows()), 2);
// check equal dimensions
if (D * D != static_cast<idx>(A.rows()))
throw exception::DimsInvalid("qpp::choi2super()");
// END EXCEPTION CHECKS
cmat result(D * D, D * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(4)
#endif // WITH_OPENMP_
for (idx a = 0; a < D; ++a)
for (idx b = 0; b < D; ++b)
for (idx m = 0; m < D; ++m)
for (idx n = 0; n < D; ++n)
result(a * D + b, m * D + n) = A(m * D + a, n * D + b);
return result;
}
/**
* \brief Converts superoperator matrix to Choi matrix
* \see qpp::choi2super()
*
* \param A Superoperator matrix
* \return Choi matrix
*/
inline cmat super2choi(const cmat& A) {
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::super2choi()");
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::super2choi()");
idx D = internal::get_dim_subsys(static_cast<idx>(A.rows()), 2);
// check equal dimensions
if (D * D != static_cast<idx>(A.rows()))
throw exception::DimsInvalid("qpp::super2choi()");
// END EXCEPTION CHECKS
cmat result(D * D, D * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(4)
#endif // WITH_OPENMP_
for (idx a = 0; a < D; ++a)
for (idx b = 0; b < D; ++b)
for (idx m = 0; m < D; ++m)
for (idx n = 0; n < D; ++n)
result(m * D + a, n * D + b) = A(a * D + b, m * D + n);
return result;
}
/**
* \brief Partial trace
* \see qpp::ptrace2()
*
* Partial trace over the first subsystem of bi-partite state vector or density
* matrix
*
* \param A Eigen expression
* \param dims Dimensions of the bi-partite system
* \return Partial trace \f$Tr_{A}(\cdot)\f$ over the first subsytem \f$A\f$
* in a bi-partite system \f$A\otimes B\f$, as a dynamic matrix over the same
* scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace1(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace1()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ptrace1()");
// check dims has only 2 elements
if (dims.size() != 2)
throw exception::NotBipartite("qpp::ptrace1()");
// END EXCEPTION CHECKS
idx DA = dims[0];
idx DB = dims[1];
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Zero(DB, DB);
//************ ket ************//
if (internal::check_cvector(rA)) // we have a ket
{
// check that dims match the dimension of A
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::ptrace1()");
auto worker = [&](idx i, idx j) noexcept->typename Derived::Scalar {
typename Derived::Scalar sum = 0;
for (idx m = 0; m < DA; ++m)
sum += rA(m * DB + i) * std::conj(rA(m * DB + j));
return sum;
}; /* end worker */
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < DB; ++j)
for (idx i = 0; i < DB; ++i)
result(i, j) = worker(i, j);
}
//************ density matrix ************//
else if (internal::check_square_mat(rA)) // we have a density operator
{
// check that dims match the dimension of A
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::ptrace1()");
auto worker = [&](idx i, idx j) noexcept->typename Derived::Scalar {
typename Derived::Scalar sum = 0;
for (idx m = 0; m < DA; ++m)
sum += rA(m * DB + i, m * DB + j);
return sum;
}; /* end worker */
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < DB; ++j)
for (idx i = 0; i < DB; ++i)
result(i, j) = worker(i, j);
}
//************ Exception: not ket nor density matrix ************//
else
throw exception::MatrixNotSquareNorCvector("qpp::ptrace1()");
return result;
}
/**
* \brief Partial trace
* \see qpp::ptrace2()
*
* Partial trace over the first subsystem of bi-partite state vector or density
* matrix
*
* \param A Eigen expression
* \param d Subsystem dimensions
* \return Partial trace \f$Tr_{A}(\cdot)\f$ over the first subsytem \f$A\f$ in
* a bi-partite system \f$A\otimes B\f$, as a dynamic matrix over the same
* scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace1(const Eigen::MatrixBase<Derived>& A,
idx d = 2) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace1()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::ptrace1()");
// END EXCEPTION CHECKS
std::vector<idx> dims(2, d); // local dimensions vector
return ptrace1(rA, dims);
}
/**
* \brief Partial trace
* \see qpp::ptrace1()
*
* Partial trace over the second subsystem of bi-partite state vector or density
* matrix
*
* \param A Eigen expression
* \param dims Dimensions of the bi-partite system
* \return Partial trace \f$Tr_{B}(\cdot)\f$ over the second subsytem \f$B\f$ in
* a bi-partite system \f$A\otimes B\f$, as a dynamic matrix over the same
* scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace2(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace2()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ptrace2()");
// check dims has only 2 elements
if (dims.size() != 2)
throw exception::NotBipartite("qpp::ptrace2()");
// END EXCEPTION CHECKS
idx DA = dims[0];
idx DB = dims[1];
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Zero(DA, DA);
//************ ket ************//
if (internal::check_cvector(rA)) // we have a ket
{
// check that dims match the dimension of A
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::ptrace2()");
auto worker = [&](idx i, idx j) noexcept->typename Derived::Scalar {
typename Derived::Scalar sum = 0;
for (idx m = 0; m < DB; ++m)
sum += rA(i * DB + m) * std::conj(rA(j * DB + m));
return sum;
}; /* end worker */
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < DA; ++j)
for (idx i = 0; i < DA; ++i)
result(i, j) = worker(i, j);
}
//************ density matrix ************//
else if (internal::check_square_mat(rA)) // we have a density operator
{
// check that dims match the dimension of A
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::ptrace2()");
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < DA; ++j)
for (idx i = 0; i < DA; ++i)
result(i, j) = trace(rA.block(i * DB, j * DB, DB, DB));
}
//************ Exception: not ket nor density matrix ************//
else
throw exception::MatrixNotSquareNorCvector("qpp::ptrace1()");
return result;
}
/**
* \brief Partial trace
* \see qpp::ptrace1()
*
* Partial trace over the second subsystem of bi-partite state vector or density
* matrix
*
* \param A Eigen expression
* \param d Subsystem dimensions
* \return Partial trace \f$Tr_{B}(\cdot)\f$ over the second subsytem \f$B\f$ in
* a bi-partite system \f$A\otimes B\f$, as a dynamic matrix over the same
* scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace2(const Eigen::MatrixBase<Derived>& A,
idx d = 2) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace2()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::ptrace2()");
// END EXCEPTION CHECKS
std::vector<idx> dims(2, d); // local dimensions vector
return ptrace2(rA, dims);
}
/**
* \brief Partial trace
* \see qpp::ptrace1(), qpp::ptrace2()
*
* Partial trace of the multi-partite state vector or density matrix over the
* list \a target of subsystems
*
* \param A Eigen expression
* \param target Subsystem indexes
* \param dims Dimensions of the multi-partite system
* \return Partial trace \f$Tr_{subsys}(\cdot)\f$ over the subsytems \a target
* in a multi-partite system, as a dynamic matrix over the same scalar field as
* \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& target,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ptrace()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::ptrace()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::ptrace()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::ptrace()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::ptrace()");
// END EXCEPTION CHECKS
idx D = static_cast<idx>(rA.rows());
idx n = dims.size();
idx n_subsys = target.size();
idx n_subsys_bar = n - n_subsys;
idx Dsubsys = 1;
for (idx i = 0; i < n_subsys; ++i)
Dsubsys *= dims[target[i]];
idx Dsubsys_bar = D / Dsubsys;
idx Cdims[maxn];
idx Csubsys[maxn];
idx Cdimssubsys[maxn];
idx Csubsys_bar[maxn];
idx Cdimssubsys_bar[maxn];
idx Cmidxcolsubsys_bar[maxn];
std::vector<idx> subsys_bar = complement(target, n);
std::copy(std::begin(subsys_bar), std::end(subsys_bar),
std::begin(Csubsys_bar));
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
}
for (idx i = 0; i < n_subsys; ++i) {
Csubsys[i] = target[i];
Cdimssubsys[i] = dims[target[i]];
}
for (idx i = 0; i < n_subsys_bar; ++i) {
Cdimssubsys_bar[i] = dims[subsys_bar[i]];
}
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>(Dsubsys_bar, Dsubsys_bar);
//************ ket ************//
if (internal::check_cvector(rA)) // we have a ket
{
if (target.size() == dims.size()) {
result(0, 0) = (adjoint(rA) * rA).value();
return result;
}
if (target.empty())
return rA * adjoint(rA);
auto worker = [&](idx i) noexcept->typename Derived::Scalar {
// use static allocation for speed!
idx Cmidxrow[maxn];
idx Cmidxcol[maxn];
idx Cmidxrowsubsys_bar[maxn];
idx Cmidxsubsys[maxn];
/* get the row multi-indexes of the complement */
internal::n2multiidx(i, n_subsys_bar, Cdimssubsys_bar,
Cmidxrowsubsys_bar);
/* write them in the global row/col multi-indexes */
for (idx k = 0; k < n_subsys_bar; ++k) {
Cmidxrow[Csubsys_bar[k]] = Cmidxrowsubsys_bar[k];
Cmidxcol[Csubsys_bar[k]] = Cmidxcolsubsys_bar[k];
}
typename Derived::Scalar sm = 0;
for (idx a = 0; a < Dsubsys; ++a) {
// get the multi-index over which we do the summation
internal::n2multiidx(a, n_subsys, Cdimssubsys, Cmidxsubsys);
// write it into the global row/col multi-indexes
for (idx k = 0; k < n_subsys; ++k)
Cmidxrow[Csubsys[k]] = Cmidxcol[Csubsys[k]] =
Cmidxsubsys[k];
// now do the sum
sm += rA(internal::multiidx2n(Cmidxrow, n, Cdims)) *
std::conj(rA(internal::multiidx2n(Cmidxcol, n, Cdims)));
}
return sm;
}; /* end worker */
for (idx j = 0; j < Dsubsys_bar; ++j) // column major order for speed
{
// compute the column multi-indexes of the complement
internal::n2multiidx(j, n_subsys_bar, Cdimssubsys_bar,
Cmidxcolsubsys_bar);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < Dsubsys_bar; ++i) {
result(i, j) = worker(i);
}
}
}
//************ density matrix ************//
else // we have a density operator
{
if (target.size() == dims.size()) {
result(0, 0) = rA.trace();
return result;
}
if (target.empty())
return rA;
auto worker = [&](idx i) noexcept->typename Derived::Scalar {
// use static allocation for speed!
idx Cmidxrow[maxn];
idx Cmidxcol[maxn];
idx Cmidxrowsubsys_bar[maxn];
idx Cmidxsubsys[maxn];
/* get the row/col multi-indexes of the complement */
internal::n2multiidx(i, n_subsys_bar, Cdimssubsys_bar,
Cmidxrowsubsys_bar);
/* write them in the global row/col multi-indexes */
for (idx k = 0; k < n_subsys_bar; ++k) {
Cmidxrow[Csubsys_bar[k]] = Cmidxrowsubsys_bar[k];
Cmidxcol[Csubsys_bar[k]] = Cmidxcolsubsys_bar[k];
}
typename Derived::Scalar sm = 0;
for (idx a = 0; a < Dsubsys; ++a) {
// get the multi-index over which we do the summation
internal::n2multiidx(a, n_subsys, Cdimssubsys, Cmidxsubsys);
// write it into the global row/col multi-indexes
for (idx k = 0; k < n_subsys; ++k)
Cmidxrow[Csubsys[k]] = Cmidxcol[Csubsys[k]] =
Cmidxsubsys[k];
// now do the sum
sm += rA(internal::multiidx2n(Cmidxrow, n, Cdims),
internal::multiidx2n(Cmidxcol, n, Cdims));
}
return sm;
}; /* end worker */
for (idx j = 0; j < Dsubsys_bar; ++j) // column major order for speed
{
// compute the column multi-indexes of the complement
internal::n2multiidx(j, n_subsys_bar, Cdimssubsys_bar,
Cmidxcolsubsys_bar);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < Dsubsys_bar; ++i) {
result(i, j) = worker(i);
}
}
}
return result;
}
/**
* \brief Partial trace
* \see qpp::ptrace1(), qpp::ptrace2()
*
* Partial trace of the multi-partite state vector or density matrix over the
* list \a target of subsystems
*
* \param A Eigen expression
* \param target Subsystem indexes
* \param d Subsystem dimensions
* \return Partial trace \f$Tr_{subsys}(\cdot)\f$ over the subsytems \a target
* in a multi-partite system, as a dynamic matrix over the same scalar field as
* \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> ptrace(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& target,
idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptrace()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::ptrace()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return ptrace(rA, target, dims);
}
/**
* \brief Partial transpose
*
* Partial transpose of the multi-partite state vector or density matrix over
* the list \a target of subsystems
*
* \param A Eigen expression
* \param target Subsystem indexes
* \param dims Dimensions of the multi-partite system
* \return Partial transpose \f$(\cdot)^{T_{subsys}}\f$
* over the subsytems \a target in a multi-partite system, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
ptranspose(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& target,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptranspose()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ptranspose()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::ptranspose()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::ptranspose()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::ptranspose()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::ptranspose()");
// END EXCEPTION CHECKS
idx D = static_cast<idx>(rA.rows());
idx n = dims.size();
idx n_subsys = target.size();
idx Cdims[maxn];
idx Cmidxcol[maxn];
idx Csubsys[maxn];
// copy dims in Cdims and target in Csubsys
for (idx i = 0; i < n; ++i)
Cdims[i] = dims[i];
for (idx i = 0; i < n_subsys; ++i)
Csubsys[i] = target[i];
dyn_mat<typename Derived::Scalar> result(D, D);
//************ ket ************//
if (internal::check_cvector(rA)) // we have a ket
{
if (target.size() == dims.size())
return (rA * adjoint(rA)).transpose();
if (target.empty())
return rA * adjoint(rA);
auto worker = [&](idx i) noexcept->typename Derived::Scalar {
// use static allocation for speed!
idx midxcoltmp[maxn];
idx midxrow[maxn];
for (idx k = 0; k < n; ++k)
midxcoltmp[k] = Cmidxcol[k];
/* compute the row multi-index */
internal::n2multiidx(i, n, Cdims, midxrow);
for (idx k = 0; k < n_subsys; ++k)
std::swap(midxcoltmp[Csubsys[k]], midxrow[Csubsys[k]]);
/* writes the result */
return rA(internal::multiidx2n(midxrow, n, Cdims)) *
std::conj(rA(internal::multiidx2n(midxcoltmp, n, Cdims)));
}; /* end worker */
for (idx j = 0; j < D; ++j) {
// compute the column multi-index
internal::n2multiidx(j, n, Cdims, Cmidxcol);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < D; ++i)
result(i, j) = worker(i);
}
}
//************ density matrix ************//
else // we have a density operator
{
if (target.size() == dims.size())
return rA.transpose();
if (target.empty())
return rA;
auto worker = [&](idx i) noexcept->typename Derived::Scalar {
// use static allocation for speed!
idx midxcoltmp[maxn];
idx midxrow[maxn];
for (idx k = 0; k < n; ++k)
midxcoltmp[k] = Cmidxcol[k];
/* compute the row multi-index */
internal::n2multiidx(i, n, Cdims, midxrow);
for (idx k = 0; k < n_subsys; ++k)
std::swap(midxcoltmp[Csubsys[k]], midxrow[Csubsys[k]]);
/* writes the result */
return rA(internal::multiidx2n(midxrow, n, Cdims),
internal::multiidx2n(midxcoltmp, n, Cdims));
}; /* end worker */
for (idx j = 0; j < D; ++j) {
// compute the column multi-index
internal::n2multiidx(j, n, Cdims, Cmidxcol);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < D; ++i)
result(i, j) = worker(i);
}
}
return result;
}
/**
* \brief Partial transpose
*
* Partial transpose of the multi-partite state vector or density matrix over
* the list \a target of subsystems
*
* \param A Eigen expression
* \param target Subsystem indexes
* \param d Subsystem dimensions
* \return Partial transpose \f$(\cdot)^{T_{subsys}}\f$ over the subsytems
* \a target in a multi-partite system, as a dynamic matrix over the same scalar
* field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
ptranspose(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& target,
idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::ptranspose()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::ptranspose()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return ptranspose(rA, target, dims);
}
/**
* \brief Subsystem permutation
*
* Permutes the subsystems of a state vector or density matrix. The qubit
* \a perm[\a i] is permuted to the location \a i.
*
* \param A Eigen expression
* \param perm Permutation
* \param dims Dimensions of the multi-partite system
* \return Permuted system, as a dynamic matrix over the same scalar field as
* \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
syspermute(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& perm,
const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::syspermute()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::syspermute()");
// check that we have a valid permutation
if (!internal::check_perm(perm))
throw exception::PermInvalid("qpp::syspermute()");
// check that permutation match dimensions
if (perm.size() != dims.size())
throw exception::PermMismatchDims("qpp::syspermute()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::syspermute()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::syspermute()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::syspermute()");
// END EXCEPTION CHECKS
idx D = static_cast<idx>(rA.rows());
idx n = dims.size();
dyn_mat<typename Derived::Scalar> result;
//************ ket ************//
if (internal::check_cvector(rA)) // we have a column vector
{
idx Cdims[maxn];
idx Cperm[maxn];
// copy dims in Cdims and perm in Cperm
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
Cperm[i] = perm[i];
}
result.resize(D, 1);
auto worker = [&Cdims, &Cperm, n ](idx i) noexcept->idx {
// use static allocation for speed,
// double the size for matrices reshaped as vectors
idx midx[maxn];
idx midxtmp[maxn];
idx permdims[maxn];
/* compute the multi-index */
internal::n2multiidx(i, n, Cdims, midx);
for (idx k = 0; k < n; ++k) {
permdims[k] = Cdims[Cperm[k]]; // permuted dimensions
midxtmp[k] = midx[Cperm[k]]; // permuted multi-indexes
}
return internal::multiidx2n(midxtmp, n, permdims);
}; /* end worker */
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < D; ++i)
result(worker(i)) = rA(i);
}
//************ density matrix ************//
else // we have a density operator
{
idx Cdims[2 * maxn];
idx Cperm[2 * maxn];
// copy dims in Cdims and perm in Cperm
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
Cdims[i + n] = dims[i];
Cperm[i] = perm[i];
Cperm[i + n] = perm[i] + n;
}
result.resize(D * D, 1);
// map A to a column vector
dyn_mat<typename Derived::Scalar> vectA =
Eigen::Map<dyn_mat<typename Derived::Scalar>>(
const_cast<typename Derived::Scalar*>(rA.data()), D * D, 1);
auto worker = [&Cdims, &Cperm, n ](idx i) noexcept->idx {
// use static allocation for speed,
// double the size for matrices reshaped as vectors
idx midx[2 * maxn];
idx midxtmp[2 * maxn];
idx permdims[2 * maxn];
/* compute the multi-index */
internal::n2multiidx(i, 2 * n, Cdims, midx);
for (idx k = 0; k < 2 * n; ++k) {
permdims[k] = Cdims[Cperm[k]]; // permuted dimensions
midxtmp[k] = midx[Cperm[k]]; // permuted multi-indexes
}
return internal::multiidx2n(midxtmp, 2 * n, permdims);
}; /* end worker */
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < D * D; ++i)
result(worker(i)) = rA(i);
result = reshape(result, D, D);
}
return result;
}
/**
* \brief Subsystem permutation
*
* Permutes the subsystems of a state vector or density matrix. The qubit
* \a perm[\a i] is permuted to the location \a i.
*
* \param A Eigen expression
* \param perm Permutation
* \param d Subsystem dimensions
* \return Permuted system, as a dynamic matrix over the same scalar field as
* \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
syspermute(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& perm,
idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::syspermute()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::syspermute()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return syspermute(rA, perm, dims);
}
// as in https://arxiv.org/abs/1707.08834
/**
* \brief Applies the qudit quantum Fourier transform to the part \a target of
* the multi-partite state vector or density matrix \a A
*
* \param A Eigen expression
* \param target Subsystem indexes where the QFT is applied
* \param d Subsystem dimensions
* \param swap Swaps the qubits/qudits at the end (true by default)
* \return Qudit Quantum Fourier transform applied to the part \a target of \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> applyQFT(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& target,
idx d = 2, bool swap = true) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::applyQFT()");
// check valid subsystem dimension
if (d < 2)
throw exception::DimsInvalid("qpp::applyQFT()");
// total number of qubits/qudits in the state
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::applyQFT()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::applyQFT()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::applyQFT()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::applyQFT()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result = rA;
idx n_subsys = target.size();
if (d == 2) // qubits
{
for (idx i = 0; i < n_subsys; ++i) {
// apply Hadamard on qubit i
result = apply(result, Gates::get_instance().H, {target[i]});
// apply controlled rotations
for (idx j = 2; j <= n_subsys - i; ++j) {
// construct Rj
cmat Rj(2, 2);
Rj << 1, 0, 0, exp(2.0 * pi * 1_i / std::pow(2, j));
result =
applyCTRL(result, Rj, {target[i + j - 1]}, {target[i]});
}
}
if (swap) {
// we have the qubits in reversed order, we must swap them
for (idx i = 0; i < n_subsys / 2; ++i) {
result = apply(result, Gates::get_instance().SWAP,
{target[i], target[n_subsys - i - 1]});
}
}
} else { // qudits
for (idx i = 0; i < n_subsys; ++i) {
// apply qudit Fourier on qudit i
result = apply(result, Gates::get_instance().Fd(d), {target[i]}, d);
// apply controlled rotations
for (idx j = 2; j <= n_subsys - i; ++j) {
// construct Rj
cmat Rj = cmat::Zero(d, d);
for (idx m = 0; m < d; ++m) {
Rj(m, m) = exp(2.0 * pi * m * 1_i / std::pow(d, j));
}
result =
applyCTRL(result, Rj, {target[i + j - 1]}, {target[i]}, d);
}
}
if (swap) {
// we have the qudits in reversed order, we must swap them
for (idx i = 0; i < n_subsys / 2; ++i) {
result = apply(result, Gates::get_instance().SWAPd(d),
{target[i], target[n_subsys - i - 1]}, d);
}
}
}
return result;
}
// as in https://arxiv.org/abs/1707.08834
/**
* \brief Applies the inverse (adjoint) qudit quantum Fourier transform to the
* part \a target of the multi-partite state vector or density matrix \a A
*
* \param A Eigen expression
* \param target Subsystem indexes where the TFQ is applied
* \param d Subsystem dimensions
* \param swap Swaps the qubits/qudits at the end (true by default)
* \return Inverse (adjoint) qudit Quantum Fourier transform applied to the part
* \a target of \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> applyTFQ(const Eigen::MatrixBase<Derived>& A,
const std::vector<idx>& target,
idx d = 2, bool swap = true) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero sizes
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::applyTFQ()");
// check valid subsystem dimension
if (d < 2)
throw exception::DimsInvalid("qpp::applyTFQ()");
// total number of qubits/qudits in the state
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::applyTFQ()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::applyTFQ()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::applyTFQ()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::applyTFQ()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result = rA;
idx n_subsys = target.size();
if (d == 2) // qubits
{
if (swap) {
// we have the qubits in reversed order, we must swap them
for (idx i = n_subsys / 2; i-- > 0;) {
result = apply(result, Gates::get_instance().SWAP,
{target[i], target[n_subsys - i - 1]});
}
}
for (idx i = n_subsys; i-- > 0;) {
// apply controlled rotations
for (idx j = n_subsys - i + 1; j-- > 2;) {
// construct Rj
cmat Rj(2, 2);
Rj << 1, 0, 0, exp(-2.0 * pi * 1_i / std::pow(2, j));
result =
applyCTRL(result, Rj, {target[i + j - 1]}, {target[i]});
}
// apply Hadamard on qubit i
result = apply(result, Gates::get_instance().H, {target[i]});
}
} else { // qudits
if (swap) {
// we have the qudits in reversed order, we must swap them
for (idx i = n_subsys / 2; i-- > 0;) {
result = apply(result, Gates::get_instance().SWAPd(d),
{target[i], target[n_subsys - i - 1]}, d);
}
}
for (idx i = n_subsys; i-- > 0;) {
// apply controlled rotations
for (idx j = n_subsys - i + 1; j-- > 2;) {
// construct Rj
cmat Rj = cmat::Zero(d, d);
for (idx m = 0; m < d; ++m) {
Rj(m, m) = exp(-2.0 * pi * m * 1_i / std::pow(d, j));
}
result =
applyCTRL(result, Rj, {target[i + j - 1]}, {target[i]}, d);
}
// apply qudit Fourier on qudit i
result = apply(result, adjoint(Gates::get_instance().Fd(d)),
{target[i]}, d);
}
}
return result;
}
// as in https://arxiv.org/abs/1707.08834
/**
* \brief Qudit quantum Fourier transform
*
* \param A Eigen expression
* \param d Subsystem dimensions
* \param swap Swaps the qubits/qudits at the end (true by default)
* \return Qudit quantum Fourier transform applied on \a A
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar> QFT(const Eigen::MatrixBase<Derived>& A,
idx d = 2, bool swap = true) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::QFT()");
// check valid subsystem dimension
if (d < 2)
throw exception::DimsInvalid("qpp::QFT()");
// total number of qubits/qudits in the state
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::QFT()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::QFT()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::QFT()");
// END EXCEPTION CHECKS
std::vector<idx> subsys(n);
std::iota(std::begin(subsys), std::end(subsys), 0);
ket result = applyQFT(rA, subsys, d, swap);
return result;
}
// as in https://arxiv.org/abs/1707.08834
/**
* \brief Inverse (adjoint) qudit quantum Fourier transform
*
* \param A Eigen expression
* \param d Subsystem dimensions
* \param swap Swaps the qubits/qudits at the end (true by default)
* \return Inverse (adjoint) qudit quantum Fourier transform applied on \a A
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar> TFQ(const Eigen::MatrixBase<Derived>& A,
idx d = 2, bool swap = true) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::TFQ()");
// check valid subsystem dimension
if (d < 2)
throw exception::DimsInvalid("qpp::TFQ()");
// total number of qubits/qudits in the state
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::QFT()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::QFT()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::QFT()");
// END EXCEPTION CHECKS
std::vector<idx> subsys(n);
std::iota(std::begin(subsys), std::end(subsys), 0);
ket result = applyTFQ(rA, subsys, d, swap);
return result;
}
} /* namespace qpp */
#endif /* OPERATIONS_H_ */
|
GB_unaryop__lnot_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_uint16
// op(A') function: GB_tran__lnot_fp64_uint16
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_uint16
(
double *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_fp64
// op(A') function: GB_tran__minv_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_fp64
(
double *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BIDMat_CBLAS.c | #include <jni.h>
#include <omp.h>
#ifdef __INTEL_COMPILER
#include <mkl.h>
#include <mkl_trans.h>
#else
#include <cblas.h>
#endif
JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CBLAS_ddot
(JNIEnv * env, jobject calling_obj, jint N, jdoubleArray jX, jint incX, jdoubleArray jY, jint incY){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jdouble returnValue;
returnValue = cblas_ddot(N, X, incX, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
return returnValue;
}
JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CBLAS_ddotxx
(JNIEnv * env, jobject calling_obj, jint N, jdoubleArray jX, jint startX, jdoubleArray jY, jint startY){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jdouble returnValue;
returnValue = cblas_ddot(N, X+startX, 1, Y+startY, 1);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
return returnValue;
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_ddotm
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jdoubleArray jX, jint ldx, jdoubleArray jY, jint ldy, jdoubleArray jZ){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jdouble * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i;
for (i = 0; i < ncols; i++) {
Z[i] = cblas_ddot(nrows, X+i*ldx, 1, Y+i*ldy, 1);
}
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_ddotr
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jdoubleArray jX, jint ldx, jdoubleArray jY, jint ldy, jdoubleArray jZ){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jdouble * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i, j;
for (i = 0; i < ncols; i++) {
#pragma omp parallel for
for (j = 0; j < nrows; j++) {
Z[j] += X[j + i*ldx] * Y[j + i*ldy];
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_daxpy
(JNIEnv * env, jobject calling_obj, jint N, jdouble a, jdoubleArray jX, jint incX, jdoubleArray jY, jint incY){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_daxpy(N, a, X, incX, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_daxpyxx
(JNIEnv * env, jobject calling_obj, jint N, jdouble a, jdoubleArray jX, jint startX, jdoubleArray jY, jint startY){
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_daxpy(N, a, X+startX, 1, Y+startY, 1);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dgemv
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jdouble alpha,
jdoubleArray jA, jint lda, jdoubleArray jX, jint incX, jdouble beta, jdoubleArray jY, jint incY){
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_dgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dgemm
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K,
jdouble alpha, jdoubleArray jA, jint lda, jdoubleArray jB, jint ldb, jdouble beta, jdoubleArray jC, jint ldc){
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jdouble * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
cblas_dgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K,
alpha, A, lda, B, ldb, beta, C, ldc);
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_domatcopy
(JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N,
jdouble alpha, jdoubleArray j_A, jint lda, jdoubleArray j_B, jint ldb) {
char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0);
char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0);
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
mkl_domatcopy(order[0], transA[0], M, N, alpha, A, lda, B, ldb);
#else
int corder = (order[0] == 'r' || order[0] == 'R') ? CblasRowMajor : CblasColMajor;
int ctrans = (transA[0] == 't' || transA[0] == 'T') ? CblasTrans : CblasNoTrans;
cblas_domatcopy(corder, ctrans, M, N, alpha, A, lda, B, ldb);
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
(*env)->ReleaseStringUTFChars(env, j_transA, transA);
(*env)->ReleaseStringUTFChars(env, j_order, order);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dmcscm
(JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda,
jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE);
jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE);
jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE);
int ioff = jc[0];
int i;
#pragma omp parallel for
for (i = 0; i < N; i++) {
int j, ir0;
for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) {
ir0 = ir[j]-ioff;
cblas_daxpy(M, B[j], A+(ir0*lda), 1, C+(i*ldc), 1);
}
}
(*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dmcsrm
(JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda,
jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE);
jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE);
jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE);
int ioff = jc[0];
int i;
#pragma omp parallel for
for (i = 0; i < N; i++) {
int j, k;
for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) {
k = ir[j]-ioff;
cblas_daxpy(M, B[j], A+(i*lda), 1, C+(k*ldc), 1);
}
}
(*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT jfloat JNICALL Java_edu_berkeley_bid_CBLAS_sdot
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint incX, jfloatArray jY, jint incY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat returnValue;
returnValue = cblas_sdot(N, X, incX, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
return returnValue;
}
JNIEXPORT jfloat JNICALL Java_edu_berkeley_bid_CBLAS_sdotxx
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint startX, jfloatArray jY, jint startY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat returnValue;
returnValue = cblas_sdot(N, X+startX, 1, Y+startY, 1);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
return returnValue;
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sdotm
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i, j;
for (i = 0; i < ncols; i++) {
Z[i] = cblas_sdot(nrows, X+i*ldx, 1, Y+i*ldy, 1);
}
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sdotr
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i, j;
for (i = 0; i < ncols; i++) {
#pragma omp parallel for
for (j = 0; j < nrows; j++) {
Z[j] += X[j + i*ldx] * Y[j + i*ldy];
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sgemv
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jfloat alpha,
jfloatArray jA, jint lda, jfloatArray jX, jint incX, jfloat beta, jfloatArray jY, jint incY){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_sgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sgemm
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K,
jfloat alpha, jfloatArray jA, jint lda, jfloatArray jB, jint ldb, jfloat beta, jfloatArray jC, jint ldc){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jfloat * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
cblas_sgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K,
alpha, A, lda, B, ldb, beta, C, ldc);
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sgemmx
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K,
jfloat alpha, jfloatArray jA, jint Aoff, jint lda, jfloatArray jB, jint Boff, jint ldb, jfloat beta, jfloatArray jC, jint Coff, jint ldc){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jfloat * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
cblas_sgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K,
alpha, A+Aoff, lda, B+Boff, ldb, beta, C+Coff, ldc);
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_somatcopy
(JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N,
jfloat alpha, jfloatArray j_A, jint lda, jfloatArray j_B, jint ldb) {
char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0);
char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0);
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
mkl_somatcopy(order[0], transA[0], M, N, alpha, A, lda, B, ldb);
#else
int corder = (order[0] == 'r' || order[0] == 'R') ? CblasRowMajor : CblasColMajor;
int ctrans = (transA[0] == 't' || transA[0] == 'T') ? CblasTrans : CblasNoTrans;
cblas_somatcopy(corder, ctrans, M, N, alpha, A, lda, B, ldb);
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
(*env)->ReleaseStringUTFChars(env, j_transA, transA);
(*env)->ReleaseStringUTFChars(env, j_order, order);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_iomatcopy
(JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N,
jintArray j_A, jint lda, jintArray j_B, jint ldb) {
char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0);
char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0);
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
mkl_somatcopy(order[0], transA[0], M, N, 1.0f, A, lda, B, ldb);
#else
int corder = (order[0] == 'r' || order[0] == 'R') ? CblasRowMajor : CblasColMajor;
int ctrans = (transA[0] == 't' || transA[0] == 'T') ? CblasTrans : CblasNoTrans;
cblas_somatcopy(corder, ctrans, M, N, 1.0f, A, lda, B, ldb);
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
(*env)->ReleaseStringUTFChars(env, j_transA, transA);
(*env)->ReleaseStringUTFChars(env, j_order, order);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_lomatcopy
(JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N,
jlongArray j_A, jint lda, jlongArray j_B, jint ldb) {
char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0);
char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0);
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
mkl_domatcopy(order[0], transA[0], M, N, 1.0, A, lda, B, ldb);
#else
int corder = (order[0] == 'r' || order[0] == 'R') ? CblasRowMajor : CblasColMajor;
int ctrans = (transA[0] == 't' || transA[0] == 'T') ? CblasTrans : CblasNoTrans;
cblas_domatcopy(corder, ctrans, M, N, 1.0, A, lda, B, ldb);
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
(*env)->ReleaseStringUTFChars(env, j_transA, transA);
(*env)->ReleaseStringUTFChars(env, j_order, order);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_spermute
(JNIEnv * env, jobject calling_obj, jint M, jint N, jint K, jfloatArray j_A, jfloatArray j_B) {
int i, offset, step = M*N;
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
for (i = 0, offset = 0; i < K; i++, offset += step) {
mkl_somatcopy('C', 'T', M, N, 1.0f, A+offset, M, B+offset, N);
}
#else
for (i = 0, offset = 0; i < K; i++, offset += step) {
cblas_somatcopy(CblasColMajor, CblasTrans, M, N, 1.0f, A+offset, M, B+offset, N);
}
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_ipermute
(JNIEnv * env, jobject calling_obj, jint M, jint N, jint K, jintArray j_A, jintArray j_B) {
int i, offset, step = M*N;
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE));
#ifdef __INTEL_COMPILER
for (i = 0, offset = 0; i < K; i++, offset += step) {
mkl_somatcopy('C', 'T', M, N, 1.0f, A+offset, M, B+offset, N);
}
#else
for (i = 0, offset = 0; i < K; i++, offset += step) {
cblas_somatcopy(CblasColMajor, CblasTrans, M, N, 1.0f, A+offset, M, B+offset, N);
}
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dpermute
(JNIEnv * env, jobject calling_obj, jint M, jint N, jint K, jdoubleArray j_A, jdoubleArray j_B) {
int i, offset, step = M*N;
jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
#ifdef __INTEL_COMPILER
for (i = 0, offset = 0; i < K; i++, offset += step) {
mkl_domatcopy('C', 'T', M, N, 1.0, A+offset, M, B+offset, N);
}
#else
for (i = 0, offset = 0; i < K; i++, offset += step) {
cblas_domatcopy(CblasColMajor, CblasTrans, M, N, 1.0, A+offset, M, B+offset, N);
}
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_lpermute
(JNIEnv * env, jobject calling_obj, jint M, jint N, jint K, jlongArray j_A, jlongArray j_B) {
int i, offset, step = M*N;
jdouble * A = (jdouble *)((*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE));
jdouble * B = (jdouble *)((*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE));
#ifdef __INTEL_COMPILER
for (i = 0, offset = 0; i < K; i++, offset += step) {
mkl_domatcopy('C', 'T', M, N, 1.0, A+offset, M, B+offset, N);
}
#else
for (i = 0, offset = 0; i < K; i++, offset += step) {
cblas_domatcopy(CblasColMajor, CblasTrans, M, N, 1.0, A+offset, M, B+offset, N);
}
#endif
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_saxpy
(JNIEnv * env, jobject calling_obj, jint N, jfloat a, jfloatArray jX, jint incX, jfloatArray jY, jint incY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_saxpy(N, a, X, incX, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_saxpyxx
(JNIEnv * env, jobject calling_obj, jint N, jfloat a, jfloatArray jX, jint startX, jfloatArray jY, jint startY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
cblas_saxpy(N, a, X+startX, 1, Y+startY, 1);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_smcscm
(JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda,
jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE);
jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE);
jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE);
int ioff = jc[0];
int i;
#pragma omp parallel for
for (i = 0; i < N; i++) {
int j, ir0;
for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) {
ir0 = ir[j]-ioff;
cblas_saxpy(M, B[j], A+(ir0*lda), 1, C+(i*ldc), 1);
}
}
(*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_smcsrm
(JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda,
jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE);
jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE);
jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE);
jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE);
int ioff = jc[0];
int i;
#pragma omp parallel for
for (i = 0; i < N; i++) {
int j, jj, k;
for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) {
jj = ir[j]-ioff;
if (M == 1) {
C[jj*ldc] += B[j] * A[i*lda];
} else if (M > 10) {
cblas_saxpy(M, B[j], A+(i*lda), 1, C+(jj*ldc), 1);
} else {
int iia = i*lda;
int jjc = jj*ldc;
float Bj = B[j];
for (k = 0; k < M; k++) {
C[jjc+k] += Bj * A[iia+k];
}
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdot
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint incX, jfloatArray jY, jint incY, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
cblas_cdotu_sub(N, X, incX, Y, incY, Z);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotxx
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint startX, jfloatArray jY, jint startY, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
cblas_cdotu_sub(N, X+startX, 1, Y+startY, 1, Z);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotm
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i;
for (i=0; i<2*ncols; i+=2) {
cblas_cdotu_sub(nrows, X+i*ldx, 1, Y+i*ldy, 1, Z+i);
}
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotr
(JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE);
int i, j;
for (i = 0; i < ncols; i++) {
#pragma omp parallel for
for (j = 0; j < nrows; j++) {
int ix, iy;
ix = 2*(j + i*ldx);
iy = 2*(j + i*ldy);
Z[2*j] += X[ix] * Y[ix] - X[ix+1] * Y[ix+1];
Z[2*j+1] += X[ix] * Y[ix+1] + X[ix+1] * Y[ix];
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cgemv
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jfloatArray jAlpha,
jfloatArray jA, jint lda, jfloatArray jX, jint incX, jfloatArray jBeta, jfloatArray jY, jint incY){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * alpha = (*env)->GetPrimitiveArrayCritical(env, jAlpha, JNI_FALSE);
jfloat * beta = (*env)->GetPrimitiveArrayCritical(env, jBeta, JNI_FALSE);
cblas_cgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jBeta, beta, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jAlpha, alpha, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cgemm
(JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K,
jfloatArray jAlpha, jfloatArray jA, jint lda, jfloatArray jB, jint ldb, jfloatArray jBeta, jfloatArray jC, jint ldc){
jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jfloat * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
jfloat * alpha = (*env)->GetPrimitiveArrayCritical(env, jAlpha, JNI_FALSE);
jfloat * beta = (*env)->GetPrimitiveArrayCritical(env, jBeta, JNI_FALSE);
cblas_cgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K,
alpha, A, lda, B, ldb, beta, C, ldc);
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_caxpy
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jA, jfloatArray jX, jint incX,
jfloatArray jY, jint incY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * a = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
cblas_caxpy(N, a, X, incX, Y, incY);
(*env)->ReleasePrimitiveArrayCritical(env, jA, a, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_caxpyxx
(JNIEnv * env, jobject calling_obj, jint N, jfloatArray jA, jfloatArray jX, jint startX, jfloatArray jY, jint startY){
jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE);
jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE);
jfloat * a = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
cblas_caxpy(N, a, X+startX, 1, Y+startY, 1);
(*env)->ReleasePrimitiveArrayCritical(env, jA, a, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_blockSgemm
(JNIEnv *env, jobject obj, jint transA, jint transB, jint nr, jint nc, jint kk, jfloat alpha, jfloatArray jA, jint aoff, jint lda, jint astep,
jfloatArray jB, jint boff, jint ldb, jint bstep, jfloat beta, jfloatArray jC, jint coff, jint ldc, jint cstep, jint reps)
{
int i, at, bt;
jfloat *A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jfloat *B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jfloat *C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
at = (transA) ? CblasTrans : CblasNoTrans;
bt = (transB) ? CblasTrans : CblasNoTrans;
A += aoff;
B += boff;
C += coff;
for (i = 0; i < reps; i++) {
cblas_sgemm(CblasColMajor, at, bt, nr, nc, kk, alpha, A, lda, B, ldb, beta, C, ldc);
A += astep;
B += bstep;
C += cstep;
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_blockDgemm
(JNIEnv *env, jobject obj, jint transA, jint transB, jint nr, jint nc, jint kk, jdouble alpha, jdoubleArray jA, jint aoff, jint lda, jint astep,
jdoubleArray jB, jint boff, jint ldb, jint bstep, jdouble beta, jdoubleArray jC, jint coff, jint ldc, jint cstep, jint reps)
{
int i, at, bt;
jdouble *A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE);
jdouble *B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE);
jdouble *C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE);
at = (transA) ? CblasTrans : CblasNoTrans;
bt = (transB) ? CblasTrans : CblasNoTrans;
A += aoff;
B += boff;
C += coff;
for (i = 0; i < reps; i++) {
cblas_dgemm(CblasColMajor, at, bt, nr, nc, kk, alpha, A, lda, B, ldb, beta, C, ldc);
A += astep;
B += bstep;
C += cstep;
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_blockSgemm4D
(JNIEnv *env, jobject obj, jint transA, jint transB, jint nr, jint nc, jint kk, jfloat alpha,
jfloatArray jA, jint aoff, jint lda, jint astep1, jint astep2,
jfloatArray jB, jint boff, jint ldb, jint bstep1, jint bstep2, jfloat beta,
jfloatArray jC, jint coff, jint ldc, jint cstep1, jint cstep2, jint reps1, jint reps2)
{
int i, j, at, bt;
float *A = (float *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float *B = (float *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
float *C = (float *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
float *pA, *pB, *pC;
at = (transA) ? CblasTrans : CblasNoTrans;
bt = (transB) ? CblasTrans : CblasNoTrans;
A += aoff;
B += boff;
C += coff;
for (i = 0; i < reps2; i++) {
for (j = 0; j < reps1; j++) {
pA = A + (j * astep1 + i * astep2);
pB = B + (j * bstep1 + i * bstep2);
pC = C + (j * cstep1 + i * cstep2);
cblas_sgemm(CblasColMajor, at, bt, nr, nc, kk, alpha, pA, lda, pB, ldb, beta, pC, ldc);
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_blockDgemm4D
(JNIEnv *env, jobject obj, jint transA, jint transB, jint nr, jint nc, jint kk, jdouble alpha,
jdoubleArray jA, jint aoff, jint lda, jint astep1, jint astep2,
jdoubleArray jB, jint boff, jint ldb, jint bstep1, jint bstep2, jdouble beta,
jdoubleArray jC, jint coff, jint ldc, jint cstep1, jint cstep2, jint reps1, jint reps2)
{
int i, j, at, bt;
double *A = (double *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
double *B = (double *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
double *C = (double *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
double *pA, *pB, *pC;
at = (transA) ? CblasTrans : CblasNoTrans;
bt = (transB) ? CblasTrans : CblasNoTrans;
A += aoff;
B += boff;
C += coff;
for (i = 0; i < reps2; i++) {
for (j = 0; j < reps1; j++) {
pA = A + (j * astep1 + i * astep2);
pB = B + (j * bstep1 + i * bstep2);
pC = C + (j * cstep1 + i * cstep2);
cblas_dgemm(CblasColMajor, at, bt, nr, nc, kk, alpha, pA, lda, pB, ldb, beta, pC, ldc);
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_word2vecFwd
(JNIEnv *env, jobject obj, jint nrows, jint ncols, const jint nwa, const jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jfloatArray jC)
{
jint * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
jint * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
jfloat * C = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
int i;
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int j, k, c, ia, ib, coff;
float sum;
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
sum = 0;
for (c = 0; c < nrows; c++) {
sum += A[c + ia] * B[c + ib];
}
coff = nwa * (k + nwb * i);
C[j + coff] = sum;
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_word2vecBwd
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint nwa, jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jfloatArray jDA, jfloatArray jDB, jfloatArray jC, jfloat lrate)
{
jint * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
jint * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
jfloat * DA = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jDA, JNI_FALSE));
jfloat * DB = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jDB, JNI_FALSE));
jfloat * C = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
int i;
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int j, k, c;
float cv;
int ia, ib;
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
cv = lrate * C[j + nwa * (k + nwb * i)];
for (c = 0; c < nrows; c++) {
A[c + ia] += cv * DB[c + ib];
}
}
}
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
cv = lrate * C[j + nwa * (k + nwb * i)];
for (c = 0; c < nrows; c++) {
B[c + ib] += cv * DA[c + ia];
}
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jDB, DB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jDA, DA, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
}
#define mymax(A, B) ((A>B) ? A : B)
#define mymin(A, B) ((A<B) ? A : B)
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_reduceTensorFloat
(JNIEnv *env, jobject obj, jfloatArray jA, jfloatArray jB, jint m, jint n, jint p, jint op)
{
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
int i, j;
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = A[k + m * n * i];
}
}
for (j = 1; j < n; j++) {
switch (op) {
case 0 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = B[k + m * i] + A[k + m * (j + n * i)];
}
}
}
break;
case 1 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = B[k + m * i] * A[k + m * (j + n * i)];
}
}
}
break;
case 2 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymax(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
case 3 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymin(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
default: {}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_reduceTensorDouble
(JNIEnv *env, jobject obj, jdoubleArray jA, jdoubleArray jB, jint m, jint n, jint p, jint op)
{
jdouble * A = (jdouble *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jdouble * B = (jdouble *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
int i, j;
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = A[k + m * n * i];
}
}
for (j = 1; j < n; j++) {
switch (op) {
case 0 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] += A[k + m * (j + n * i)];
}
}
}
break;
case 1 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] *= A[k + m * (j + n * i)];
}
}
}
break;
case 2 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymax(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
case 3 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymin(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
default: {}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_reduceTensorInt
(JNIEnv *env, jobject obj, jintArray jA, jintArray jB, jint m, jint n, jint p, jint op)
{
jint * A = (jint *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jint * B = (jint *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
int i, j;
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = A[k + m * n * i];
}
}
for (j = 1; j < n; j++) {
switch (op) {
case 0 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] += A[k + m * (j + n * i)];
}
}
}
break;
case 1 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] *= A[k + m * (j + n * i)];
}
}
}
break;
case 2 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymax(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
case 3 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymin(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
default: {}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_reduceTensorLong
(JNIEnv *env, jobject obj, jlongArray jA, jlongArray jB, jint m, jint n, jint p, jint op)
{
jlong * A = (jlong *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jlong * B = (jlong *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
int i, j;
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = A[k + m * n * i];
}
}
for (j = 1; j < n; j++) {
switch (op) {
case 0 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] += A[k + m * (j + n * i)];
}
}
}
break;
case 1 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] *= A[k + m * (j + n * i)];
}
}
}
break;
case 2 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymax(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
case 3 : {
#pragma omp parallel for
for (i = 0; i < p; i++) {
int k;
for (k = 0; k < m; k++) {
B[k + m * i] = mymin(B[k + m * i], A[k + m * (j + n * i)]);
}
}
}
break;
default: {}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
|
testingPolicies.h | #pragma once
#include <iostream>
#include "timeHandler.h"
#include "datatypes.h"
#include "cxxopts.hpp"
#include "operators.h"
#include "locationTypesFormat.h"
#include "smallTools.h"
template<typename SimulationType>
class NoTesting {
public:
// add program parameters if we need any, this function got called already from Simulation
static void addProgramParameters(cxxopts::Options& options) {}
void initializeArgs(const cxxopts::ParseResult& result) {}
void init(const parser::LocationTypes& data) {}
void performTests(Timehandler simTime, unsigned timeStep) {}
auto getStats() { return thrust::make_tuple(0u, 0u, 0u); }
};
namespace DetailedTestingOps {
template<typename PPState, typename LocationType>
struct TestingArguments {
HD TestingArguments() {}
PPState* agentStatesPtr;
AgentStats* agentStatsPtr;
unsigned long* locationOffsetPtr;
unsigned* possibleLocationsPtr;
unsigned* possibleTypesPtr;
unsigned* locationQuarantineUntilPtr;
unsigned hospitalType;
unsigned homeType;
unsigned publicPlaceType;
unsigned doctorType;
unsigned schoolType;
unsigned classroomType;
unsigned nurseryhomeType;
unsigned workType;
unsigned timeStep;
unsigned timestamp;
unsigned tracked;
LocationType* locationTypePtr;
unsigned* lastTestPtr;
bool* locationFlagsPtr;
bool* diagnosedPtr;
double testingRandom;
double testingHome;
double testingWork;
double testingSchool;
double testingRandomHospital;
double testingNurseryHome;
unsigned testingDelay;
unsigned quarantineLength;
bool usePCR;
};
template<typename PPState, typename LocationType>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
__device__
#endif
void
flagLocations(unsigned i, TestingArguments<PPState, LocationType>& a) {
// If diagnosed in the last 24 hours
if (a.agentStatsPtr[i].diagnosedTimestamp > a.timestamp - 24 * 60 / a.timeStep) {
// Mark home
unsigned home = RealMovementOps::findActualLocationForType(i,
a.homeType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (home != std::numeric_limits<unsigned>::max()) a.locationFlagsPtr[home] = true;
// Mark work
unsigned work = RealMovementOps::findActualLocationForType(i,
a.workType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (work != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[work] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[work] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[work] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[work] < a.timestamp))))
a.locationFlagsPtr[work] = true;
// Mark school
unsigned school = RealMovementOps::findActualLocationForType(i,
a.schoolType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
unsigned classroom = std::numeric_limits<unsigned>::max();
if (school != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[school] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[school] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[school] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[school] < a.timestamp)))) {
a.locationFlagsPtr[school] = true;
// Mark classroom too
classroom = RealMovementOps::findActualLocationForType(i,
a.classroomType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (classroom != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[classroom] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[classroom] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[classroom] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[classroom] < a.timestamp))))
a.locationFlagsPtr[classroom] = true;
}
if (a.tracked == i) {
printf("Testing: Agent %d was diagnosed in last 24 hours, marking home %d, work %d school %d classroom %d\n",
i,
home,
work == std::numeric_limits<unsigned>::max() ? -1 : (int)work,
school == std::numeric_limits<unsigned>::max() ? -1 : (int)school,
classroom == std::numeric_limits<unsigned>::max() ? -1 : (int)classroom);
}
}
}
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<typename PPState, typename LocationType>
__global__ void flagLocationsDriver(TestingArguments<PPState, LocationType> a, unsigned numberOfAgents) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numberOfAgents) { DetailedTestingOps::flagLocations(i, a); }
}
#endif
template<typename PPState, typename LocationType>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
__device__
#endif
void
doTesting(unsigned i, TestingArguments<PPState, LocationType>& a) {
// if recently tested, don't test again
if (a.timestamp > a.testingDelay * 24 * 60 / a.timeStep && a.lastTestPtr[i] != std::numeric_limits<unsigned>::max()
&& a.lastTestPtr[i] > a.timestamp - a.testingDelay * 24 * 60 / a.timeStep)
return;
if (a.agentStatesPtr[i].getWBState() == states::WBStates::D || a.diagnosedPtr[i]) return;
// Check home
unsigned home = RealMovementOps::findActualLocationForType(i,
a.homeType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
bool homeFlag = false;
if (home != std::numeric_limits<unsigned>::max()) homeFlag = a.locationFlagsPtr[home];
// Check work
unsigned work = RealMovementOps::findActualLocationForType(i,
a.workType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
bool workFlag = false;
if (work != std::numeric_limits<unsigned>::max()) workFlag = a.locationFlagsPtr[work];
// Check school
unsigned school = RealMovementOps::findActualLocationForType(i,
a.schoolType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
unsigned classroom = std::numeric_limits<unsigned>::max();
bool schoolFlag = false;
bool classroomFlag = false;
if (school != std::numeric_limits<unsigned>::max()) {
schoolFlag = a.locationFlagsPtr[school];
classroom = RealMovementOps::findActualLocationForType(i,
a.classroomType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (classroom != std::numeric_limits<unsigned>::max()) classroomFlag = true;
}
double testingProbability = a.testingRandom;
testingProbability += homeFlag * a.testingHome;
testingProbability += workFlag * a.testingWork;
testingProbability += schoolFlag * a.testingSchool;
testingProbability += classroomFlag * 3.0 * a.testingSchool;
// If agent works in hospital or doctor's office
if (work != std::numeric_limits<unsigned>::max()
&& (a.locationTypePtr[work] == a.doctorType || a.locationTypePtr[work] == a.hospitalType)) {
testingProbability += a.testingRandomHospital;
}
// If agent works in nursery home
if (work != std::numeric_limits<unsigned>::max() && a.locationTypePtr[work] == a.nurseryhomeType) {
testingProbability += a.testingNurseryHome;
}
// If agent is hospitalized for non-COVID
if (a.agentStatsPtr[i].hospitalizedTimestamp <= a.timestamp
&& a.agentStatsPtr[i].hospitalizedUntilTimestamp > a.timestamp) {
testingProbability += a.testingRandomHospital;
}
if (a.tracked == i && testingProbability > 0.0)
printf("Testing: Agent %d testing probability: %g\n", i, testingProbability);
// Do the test
if (testingProbability > 1.0 || RandomGenerator::randomReal(1.0) < testingProbability) {
a.lastTestPtr[i] = a.timestamp;
if (a.agentStatesPtr[i].isInfected()) {
float probability = a.usePCR ? a.agentStatesPtr[i].getAccuracyPCR() : a.agentStatesPtr[i].getAccuracyAntigen();
if (probability > RandomGenerator::randomReal(1.0)) {
a.diagnosedPtr[i] = true;
a.agentStatsPtr[i].diagnosedTimestamp = a.timestamp;
if (a.tracked == i) printf("\t Agent %d tested positive\n", i);
} else {
if (a.tracked == i) printf("\t Agent %d tested FALSE negative\n", i);
}
} else {
// Release from quarantine if home is not quarantined
if (a.agentStatsPtr[i].quarantinedUntilTimestamp > a.timestamp
&& (home != std::numeric_limits<unsigned>::max() && a.locationQuarantineUntilPtr[home] < a.timestamp)) {
// Reduce number of days spent in quarantine
if (a.agentStatsPtr[i].daysInQuarantine > 0)
a.agentStatsPtr[i].daysInQuarantine -=
(a.agentStatsPtr[i].quarantinedUntilTimestamp - a.timestamp) / (24 * 60 / a.timeStep);
// End quarantine
a.agentStatsPtr[i].quarantinedUntilTimestamp =
a.timestamp;// a.quarantinedPtr will be cleared by next movementPolicy
}
if (a.tracked == i) printf("\t Agent %d tested negative\n", i);
}
} else if (testingProbability > 0.0) {
if (a.tracked == i) printf("\t Agent %d was not tested\n", i);
}
}
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<typename PPState, typename LocationType>
__global__ void doTestingDriver(TestingArguments<PPState, LocationType> a, unsigned numberOfAgents) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numberOfAgents) { DetailedTestingOps::doTesting(i, a); }
}
#endif
}// namespace DetailedTestingOps
template<typename SimulationType>
class DetailedTesting {
unsigned publicSpace;
unsigned home;
unsigned hospital;
unsigned doctor;
unsigned tracked;
unsigned quarantineLength;
unsigned school;
unsigned classroom;
unsigned nurseryhome;
unsigned work;
thrust::tuple<unsigned, unsigned, unsigned> stats;
thrust::device_vector<unsigned> lastTest;
thrust::device_vector<bool> locationFlags;
double testingRandom = 0.005;
double testingHome = 0.2;
double testingWork = 0.1;
double testingSchool = 0.1;
double testingRandomHospital = 0.2;
double testingNurseryHome = 0.3;
unsigned testingDelay = 5;
bool usePCR = true;
public:
// add program parameters if we need any, this function got called already from Simulation
static void addProgramParameters(cxxopts::Options& options) {
options.add_options()("testingProbabilities",
"Testing probabilities for random, if someone else was diagnosed at home/work/school, and random for hospital "
"workers: comma-delimited string random,home,work,school,hospital,nurseryHome",
cxxopts::value<std::string>()->default_value("0.00005,0.01,0.0005,0.0005,0.005,0.05"))("testingRepeatDelay",
"Minimum number of days between taking tests",
cxxopts::value<unsigned>()->default_value(std::to_string(unsigned(5))))("testingMethod",
"default method for testing. Can be PCR (default) on antigen. Accuracies are provided in progression json input",
cxxopts::value<std::string>()->default_value("PCR"));
}
void initializeArgs(const cxxopts::ParseResult& result) {
testingDelay = result["testingRepeatDelay"].as<unsigned>();
std::string probsString = result["testingProbabilities"].as<std::string>();
std::vector<double> params = splitStringDouble(probsString, ',');
if (params.size() > 0) testingRandom = params[0];
if (params.size() > 1) testingHome = params[1];
if (params.size() > 2) testingWork = params[2];
if (params.size() > 3) testingSchool = params[3];
if (params.size() > 4) testingRandomHospital = params[4];
if (params.size() > 5) testingNurseryHome = params[5];
// printf("testing probabilities: %g %g %g %g %g\n", testingRandom, testingHome, testingWork, testingSchool,
// testingRandomHospital);
try {
quarantineLength = result["quarantineLength"].as<unsigned>();
} catch (std::exception& e) { quarantineLength = 14; }
if (result["testingMethod"].as<std::string>().compare("PCR") == 0)
usePCR = true;
else if (result["testingMethod"].as<std::string>().compare("antigen") == 0)
usePCR = false;
else
throw CustomErrors(
"unrecognized testingMethod " + result["testingMethod"].as<std::string>() + " must be either PCR or antigen");
}
auto getStats() { return stats; }
void init(const parser::LocationTypes& data) {
publicSpace = data.publicSpace;
home = data.home;
hospital = data.hospital;
doctor = data.doctor;
school = data.school;
work = data.work;
classroom = data.classroom;
nurseryhome = data.nurseryhome;
}
void performTests(Timehandler simTime, unsigned timeStep) {
// PROFILE_FUNCTION();
auto realThis = static_cast<SimulationType*>(this);
DetailedTestingOps::TestingArguments<typename SimulationType::PPState_t, typename SimulationType::TypeOfLocation_t> a;
thrust::device_vector<unsigned>& agentLocations = realThis->agents->location;
unsigned numberOfLocations = realThis->locs->locType.size();
unsigned numberOfAgents = agentLocations.size();
a.timestamp = simTime.getTimestamp();
// Running for the first time - initialize arrays
if (lastTest.size() == 0) {
lastTest.resize(numberOfAgents);
thrust::fill(lastTest.begin(), lastTest.end(), std::numeric_limits<unsigned>::max());
locationFlags.resize(numberOfLocations);
tracked = realThis->locs->tracked;
}
// Set all flags of all locations to false (no recent diagnoses)
thrust::fill(locationFlags.begin(), locationFlags.end(), false);
a.tracked = tracked;
a.locationFlagsPtr = thrust::raw_pointer_cast(locationFlags.data());
a.lastTestPtr = thrust::raw_pointer_cast(lastTest.data());
a.hospitalType = hospital;
a.homeType = home;
a.publicPlaceType = publicSpace;
a.doctorType = doctor;
a.timeStep = timeStep;
a.schoolType = school;
a.classroomType = classroom;
a.workType = work;
a.testingHome = testingHome;
a.testingWork = testingWork;
a.testingSchool = testingSchool;
a.testingRandomHospital = testingRandomHospital;
a.testingRandom = testingRandom;
a.testingDelay = testingDelay;
a.quarantineLength = quarantineLength;
a.testingNurseryHome = testingNurseryHome;
a.usePCR = usePCR;
// agent data
thrust::device_vector<AgentStats>& agentStats = realThis->agents->agentStats;
a.agentStatsPtr = thrust::raw_pointer_cast(agentStats.data());
thrust::device_vector<typename SimulationType::PPState_t>& agentStates = realThis->agents->PPValues;
a.agentStatesPtr = thrust::raw_pointer_cast(agentStates.data());
thrust::device_vector<bool>& diagnosed = realThis->agents->diagnosed;
a.diagnosedPtr = thrust::raw_pointer_cast(diagnosed.data());
// primary location types
thrust::device_vector<typename SimulationType::TypeOfLocation_t>& locationTypes = realThis->locs->locType;
a.locationTypePtr = thrust::raw_pointer_cast(locationTypes.data());
// Arrays storing actual location IDs for each agent, for each location type
thrust::device_vector<unsigned long>& locationOffset = realThis->agents->locationOffset;
a.locationOffsetPtr = thrust::raw_pointer_cast(locationOffset.data());
thrust::device_vector<unsigned>& possibleLocations = realThis->agents->possibleLocations;
a.possibleLocationsPtr = thrust::raw_pointer_cast(possibleLocations.data());
thrust::device_vector<unsigned>& possibleTypes = realThis->agents->possibleTypes;
a.possibleTypesPtr = thrust::raw_pointer_cast(possibleTypes.data());
thrust::device_vector<unsigned>& locationQuarantineUntil = realThis->locs->quarantineUntil;
a.locationQuarantineUntilPtr = thrust::raw_pointer_cast(locationQuarantineUntil.data());
//
// Step 1 - flag locations of anyone diagnosed yesterday
//
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
#pragma omp parallel for
for (unsigned i = 0; i < numberOfAgents; i++) { DetailedTestingOps::flagLocations(i, a); }
#elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
DetailedTestingOps::flagLocationsDriver<<<(numberOfAgents - 1) / 256 + 1, 256>>>(a, numberOfAgents);
cudaDeviceSynchronize();
#endif
//
// Step 2 - do the testing
//
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
#pragma omp parallel for
for (unsigned i = 0; i < numberOfAgents; i++) { DetailedTestingOps::doTesting(i, a); }
#elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
DetailedTestingOps::doTestingDriver<<<(numberOfAgents - 1) / 256 + 1, 256>>>(a, numberOfAgents);
cudaDeviceSynchronize();
#endif
//
// Step 3 - calculate statistics
//
unsigned timestamp = simTime.getTimestamp();
// Count up those who were tested just now
unsigned tests = thrust::count(lastTest.begin(), lastTest.end(), timestamp);
// TODO: count up tests performed in movementPolicy
//...
// Count up those who have just been diagnosed because of this testing policy
unsigned positive1 = thrust::count_if(agentStats.begin(), agentStats.end(), [timestamp] HD(const AgentStats& s) {
return s.diagnosedTimestamp == timestamp;
});
// Count up those who were diagnosed yesterday, because of a doctor/hospital visit (in movementPolicy)
unsigned positive2 =
thrust::count_if(agentStats.begin(), agentStats.end(), [timestamp, timeStep] HD(const AgentStats& s) {
return s.diagnosedTimestamp < timestamp && s.diagnosedTimestamp > timestamp - 24 * 60 / timeStep;
});
stats = thrust::make_tuple(tests, positive1, positive2);
}
};
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
pomelo_fmt_plug.c | /*
* POMELO cracker patch for JtR. Hacked together during the Hash Runner 2015
* contest by Dhiru Kholia.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pomelo;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pomelo);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // XXX
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "pomelo"
#define FORMAT_NAME ""
#define FORMAT_TAG "$pomelo$"
#define TAG_LENGTH sizeof(FORMAT_TAG) - 1
#if __SSE2__
#define ALGORITHM_NAME "POMELO 128/128 SSE2 1x"
#elif !defined(USE_GCC_ASM_IA32) && defined(USE_GCC_ASM_X64)
#define ALGORITHM_NAME "POMELO 64/64"
#else
#define ALGORITHM_NAME "POMELO 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests pomelo_tests[] = {
{"$pomelo$2$3$hash runner 2015$8333ad83e46e425872c5545741d6da105cd31ad58926e437d32247e59b26703e", "HashRunner2014"},
{"$pomelo$2$3$mysalt$b5bebcd9820de6a58dba52abf76aaf6eed4c5c672dbda64e69e3e3cbcc401314", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned char salt[64];
unsigned int saltlen;
unsigned int t_cost;
unsigned int m_cost;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
char Buf[256];
if (strncmp(p, FORMAT_TAG, TAG_LENGTH))
return 0;
p += TAG_LENGTH;
strnzcpy(Buf, p, sizeof(Buf));
p = strtokm(Buf, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) >= sizeof(cur_salt->salt))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) != CIPHERTEXT_LENGTH)
return 0;
while(*p)
if (atoi16l[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH;
cs.t_cost = atoi(p);
p = strchr(p, '$') + 1;
cs.m_cost = atoi(p);
p = strchr(p, '$') + 1;
q = strchr(p, '$');
cs.saltlen = q - p;
strncpy((char*)cs.salt, p, cs.saltlen);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
int i;
char *p = strrchr(ciphertext, '$') + 1;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
memset(out, 0, BINARY_SIZE);
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
int PHS_pomelo(void *out, size_t outlen, const void *in, size_t inlen, const void *salt, size_t saltlen, unsigned int t_cost, unsigned int m_cost);
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
PHS_pomelo((unsigned char *)crypt_out[index], 32, saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->t_cost, cur_salt->m_cost);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pomelo_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_pomelo = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
pomelo_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
pomelo_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
CalcFolderSize.c | #include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
int isFile(char *path);
int isDir(char *path);
long long fileSize(char *path);
void concatPath(char *dest, char *p, char *fn);
long long newDir(char *path);
long long sum = 4096; //origin folder size
int main(int argc, char *argv[])
{
if (argc != 2)
{
fprintf(stderr, "Directory argments neeed\n");
return EXIT_FAILURE;
}
#pragma omp parallel
{
#pragma omp single
{
printf("Threads %d\n", omp_get_num_threads());
newDir(argv[1]);
}
}
printf("Total %lld bytes\n", sum);
}
long long newDir(char *path)
{
long long localsum = 0;
DIR *dir;
struct dirent *dirzeiger;
if ((dir = opendir(path)) == NULL)
fprintf(stderr, "Fehler bei opendir ...\n");
while ((dirzeiger = readdir(dir)) != NULL)
{ // Runs all files and directories in a folder - exculded subfolders
if (strcmp((*dirzeiger).d_name, ".") == 0 || strcmp((*dirzeiger).d_name, "..") == 0) //skip backsteps
continue;
char nextPath[1024];
concatPath(nextPath, path, (*dirzeiger).d_name);
if (isDir(nextPath))
#pragma omp task
{
newDir(nextPath); // every directory gets his own-new Task
}
localsum += fileSize(nextPath); // add up files sizes inside a Task(folder), - exculded subfolders
}
#pragma omp atomic
sum += localsum;
if (closedir(dir) == -1)
printf("Fehler beim Schließen von %s\n", path);
}
int isFile(char *path)
{
struct stat sb;
if (lstat(path, &sb) == -1)
perror("stat");
if ((sb.st_mode & S_IFMT) == S_IFREG)
return 1;
return 0;
}
int isDir(char *path)
{
struct stat sb;
if (lstat(path, &sb) == -1)
perror("stat");
if ((sb.st_mode & S_IFMT) == S_IFDIR)
return 1;
return 0;
}
long long fileSize(char *path)
{
struct stat sb;
if (lstat(path, &sb) == -1)
perror("stat");
return (long long)sb.st_size;
}
void concatPath(char *dest, char *p, char *fn)
{ //concats pathname with filename
*dest = 0;
strcat(dest, p);
strcat(dest, "/");
strcat(dest, fn);
}
|
GB_binop__pair_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pair_uint64
// A.*B function (eWiseMult): GB_AemultB__pair_uint64
// A*D function (colscale): GB_AxD__pair_uint64
// D*A function (rowscale): GB_DxB__pair_uint64
// C+=B function (dense accum): GB_Cdense_accumB__pair_uint64
// C+=b function (dense accum): GB_Cdense_accumb__pair_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint64
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = 1
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = 1 ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT64 || GxB_NO_PAIR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pair_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pair_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/magick.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/option.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
MagickRealType
(*filter)(const MagickRealType,const ResizeFilter *),
(*window)(const MagickRealType,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static MagickRealType
I0(MagickRealType x),
BesselOrderOne(MagickRealType),
Sinc(const MagickRealType, const ResizeFilter *),
SincFast(const MagickRealType, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const MagickRealType x,
% const MagickRealType support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static MagickRealType Blackman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static MagickRealType Bohman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine));
}
static MagickRealType Box(const MagickRealType magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
magick_unreferenced(x);
magick_unreferenced(resize_filter);
return(1.0);
}
static MagickRealType Cosine(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
cos((pi/2)*x).
*/
magick_unreferenced(resize_filter);
return((MagickRealType)cos((double) (MagickPI2*x)));
}
static MagickRealType CubicBC(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static MagickRealType Gaussian(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static MagickRealType Hanning(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static MagickRealType Hamming(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static MagickRealType Jinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
magick_unreferenced(resize_filter);
if (x == 0.0)
return((MagickRealType) (0.5*MagickPI));
return(BesselOrderOne((MagickRealType) MagickPI*x)/x);
}
static MagickRealType Kaiser(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static MagickRealType Lagrange(const MagickRealType x,
const ResizeFilter *resize_filter)
{
MagickRealType
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static MagickRealType Quadratic(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
magick_unreferenced(resize_filter);
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static MagickRealType Sinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
magick_unreferenced(resize_filter);
if (x != 0.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((MagickRealType) 1.0);
}
static MagickRealType SincFast(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
magick_unreferenced(resize_filter);
if (x > 4.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const MagickRealType xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p));
#endif
}
}
static MagickRealType Triangle(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
magick_unreferenced(resize_filter);
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static MagickRealType Welsh(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welsh parabolic windowing filter.
*/
magick_unreferenced(resize_filter);
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter,const MagickRealType blur,
const MagickBooleanType cylindrical,ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
MagickRealType
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterTypes
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
MagickRealType
(*function)(const MagickRealType,const ResizeFilter*);
double
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hanning, 1.0, 1.0, 0.0, 0.0, HanningWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welsh, 1.0, 1.0, 0.0, 0.0, WelshWeightingFunction }, /* Welsh (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
(void) exception;
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur = blur; /* function argument blur factor (1.0) */
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if (cylindrical != MagickFalse && filter_type == SincFastFilter
&& filter != SincFastFilter )
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterTypes) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterTypes) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type=cylindrical != MagickFalse ?
JincFilter : SincFastFilter;
window_type=(FilterTypes) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(MagickRealType) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= (MagickRealType) 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= (MagickRealType) 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= value/0.5; /* increase support */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI);
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(MagickRealType) lobes;
}
/* Convert a Jinc function lobes value to a real support value */
if (resize_filter->filter == Jinc)
{
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long)resize_filter->support)-1];
/* blur this filter so support is a integer value (lobes dependant) */
if (filter_type == LanczosRadiusFilter)
{
resize_filter->blur *= floor(resize_filter->support)/
resize_filter->support;
}
}
/* Expert Blur Override */
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(MagickRealType) MagickEpsilon;
/* Expert override of the support setting */
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping'
window that calling operator is planning to actually use. (Expert
override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for
weighting function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
/* Convert B,C values into Cubic Coefficents. See CubicBC(). */
{
const double twoB = B+B;
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
artifact=GetImageArtifact(image,"filter:verbose");
if (IsMagickTrue(artifact) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting
function may not exactly match the filter of the same name.
EG: a Point filter is really uses a Box weighting function
with a different support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(), (double)resize_filter->blur);
if ( filter_type == GaussianFilter || window_type == GaussianFilter )
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(), (double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),
(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing
filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(),
(double) GetResizeFilterWeight(resize_filter,x));
/* A final value so gnuplot can graph the 'stop' properly. */
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% MagickRealType BesselOrderOne(MagickRealType x)
%
% A description of each parameter follows:
%
% o x: MagickRealType value.
%
*/
#undef I0
static MagickRealType I0(MagickRealType x)
{
MagickRealType
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((MagickRealType) i*i);
}
return(sum);
}
#undef J1
static MagickRealType J1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static MagickRealType P1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static MagickRealType Q1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static MagickRealType BesselOrderOne(MagickRealType x)
{
MagickRealType
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickExport MagickRealType *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((MagickRealType *) resize_filter->coefficient);
}
MagickExport MagickRealType GetResizeFilterBlur(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickExport MagickRealType GetResizeFilterScale(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickExport MagickRealType GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickExport ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickExport ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickExport MagickRealType GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const MagickRealType x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickExport MagickRealType GetResizeFilterWeight(
const ResizeFilter *resize_filter,const MagickRealType x)
{
MagickRealType
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const InterpolatePixelMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const InterpolatePixelMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
PointInfo
offset;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
continue;
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
GetMagickPixelPacket(image,&pixel);
offset.y=((MagickRealType) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
offset.x=((MagickRealType) x+0.5)*scale.x-0.5;
(void) InterpolateMagickPixelPacket(image,image_view,method,offset.x,
offset.y,&pixel,exception);
SetPixelPacket(resize_image,&pixel,q,resize_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
continue;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,
% const size_t columns,const size_t rows,
% const double delta_x,const double rigidity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*rescale_view;
const char
*map;
guchar
*packet;
Image
*rescale_image;
int
x,
y;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MagickPixelPacket
pixel;
MemoryInfo
*pixel_info;
unsigned char
*pixels;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,image->blur,exception));
map="RGB";
if (image->matte != MagickFalse)
map="RGBA";
if (image->colorspace == CMYKColorspace)
{
map="CMYK";
if (image->matte != MagickFalse)
map="CMYKA";
}
pixel_info=AcquireVirtualMemory(image->columns,image->rows*strlen(map)*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel,
pixels,exception);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
carver=lqr_carver_new(pixels,(int) image->columns,(int) image->rows,
(int) strlen(map));
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rescale_image->exception);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
GetMagickPixelPacket(rescale_image,&pixel);
(void) lqr_carver_scan_reset(carver);
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
while (lqr_carver_scan(carver,&x,&y,&packet) != 0)
{
register IndexPacket
*magick_restrict rescale_indexes;
register PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view);
pixel.red=QuantumRange*(packet[0]/255.0);
pixel.green=QuantumRange*(packet[1]/255.0);
pixel.blue=QuantumRange*(packet[2]/255.0);
if (image->colorspace != CMYKColorspace)
{
if (image->matte != MagickFalse)
pixel.opacity=QuantumRange-QuantumRange*(packet[3]/255.0);
}
else
{
pixel.index=QuantumRange*(packet[3]/255.0);
if (image->matte != MagickFalse)
pixel.opacity=QuantumRange-QuantumRange*(packet[4]/255.0);
}
SetPixelPacket(rescale_image,&pixel,q,rescale_indexes);
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
/*
Relinquish resources.
*/
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict magnify_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
magnify_indexes=GetCacheViewAuthenticIndexQueue(magnify_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict r;
register ssize_t
i;
/*
Magnify this row of pixels.
*/
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=p[4];
r++;
*r=p[4];
r+=(magnify_image->columns-1);
*r=p[4];
r++;
*r=p[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
}
if (indexes != (const IndexPacket *) NULL)
{
register IndexPacket
*r;
/*
Magnify the colormap indexes.
*/
r=magnify_indexes;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=indexes[4];
r++;
*r=indexes[4];
r+=(magnify_image->columns-1);
*r=indexes[4];
r++;
*r=indexes[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
}
magnify_indexes+=2;
}
q+=2;
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
1.0,exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ?
72.0 : image->x_resolution)+0.5);
height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ?
72.0 : image->y_resolution)+0.5);
resample_image=ResizeImage(image,width,height,filter,blur,exception);
if (resample_image != (Image *) NULL)
{
resample_image->x_resolution=x_resolution;
resample_image->y_resolution=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,
% const size_t rows,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set
% this to 1.0.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
MagickRealType
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
bisect,
density;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i-start].pixel-contribution[0].pixel);
SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
bisect,
density;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register IndexPacket
*magick_restrict resize_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickRealType
x_factor,
y_factor;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter) && (blur == 1.0))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse,
exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Check for posible user defined sampling offset Artifact
The default sampling offset is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict sample_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view);
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
*q++=p[x_offset[x]];
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) sample_image->columns; x++)
SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x]));
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
MagickPixelPacket
pixel,
*scale_scanline,
*scanline,
*x_vector,
*y_vector,
zero;
MagickRealType
alpha;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&scale_image->exception);
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*scanline));
scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t)
scale_image->columns,sizeof(*scale_scanline));
y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*y_vector));
if ((scanline == (MagickPixelPacket *) NULL) ||
(scale_scanline == (MagickPixelPacket *) NULL) ||
(x_vector == (MagickPixelPacket *) NULL) ||
(y_vector == (MagickPixelPacket *) NULL))
{
if ((image->rows != scale_image->rows) &&
(scanline != (MagickPixelPacket *) NULL))
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
if (scale_scanline != (MagickPixelPacket *) NULL)
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(
scale_scanline);
if (x_vector != (MagickPixelPacket *) NULL)
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
if (y_vector != (MagickPixelPacket *) NULL)
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) image->columns*
sizeof(*y_vector));
GetMagickPixelPacket(image,&pixel);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
i=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict scale_indexes;
register MagickPixelPacket
*magick_restrict s,
*magick_restrict t;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view);
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x));
p++;
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
y_vector[x].red+=scale.y*x_vector[x].red;
y_vector[x].green+=scale.y*x_vector[x].green;
y_vector[x].blue+=scale.y*x_vector[x].blue;
if (scale_image->matte != MagickFalse)
y_vector[x].opacity+=scale.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
y_vector[x].index+=scale.y*x_vector[x].index;
}
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
next_row=MagickFalse;
}
s=scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.red=y_vector[x].red+span.y*x_vector[x].red;
pixel.green=y_vector[x].green+span.y*x_vector[x].green;
pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue;
if (image->matte != MagickFalse)
pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index=y_vector[x].index+span.y*x_vector[x].index;
s->red=pixel.red;
s->green=pixel.green;
s->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
s->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
s->index=pixel.index;
s++;
y_vector[x]=zero;
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
s=scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(s);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*s->red));
SetPixelGreen(q,ClampToQuantum(alpha*s->green));
SetPixelBlue(q,ClampToQuantum(alpha*s->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(s->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index));
q++;
s++;
}
}
else
{
/*
Scale X direction.
*/
pixel=zero;
next_column=MagickFalse;
span.x=1.0;
s=scanline;
t=scale_scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
pixel=zero;
t++;
}
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
pixel=zero;
next_column=MagickFalse;
t++;
}
pixel.red+=scale.x*s->red;
pixel.green+=scale.x*s->green;
pixel.blue+=scale.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=scale.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=scale.x*s->index;
span.x-=scale.x;
}
s++;
}
if (span.x > 0)
{
s--;
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
}
if ((next_column == MagickFalse) &&
((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns))
{
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
}
/*
Transfer scanline to scaled image.
*/
t=scale_scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(t);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*t->red));
SetPixelGreen(q,ClampToQuantum(alpha*t->green));
SetPixelBlue(q,ClampToQuantum(alpha*t->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(t->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index));
t++;
q++;
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
*url,
value[MaxTextExtent];
const char
*name;
Image
*thumbnail_image;
MagickRealType
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,
image->blur,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
image->blur,exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->matte == MagickFalse)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MaxTextExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (GetPathAttributes(image->filename,&attributes) != MagickFalse)
{
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value);
}
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,value);
(void) ConcatenateMagickString(value,"B",MaxTextExtent);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value);
(void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value);
url=GetMagickHomeURL();
(void) SetImageProperty(thumbnail_image,"software",url);
url=DestroyString(url);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value);
return(thumbnail_image);
}
|
bitset.h | /** Copyright 2020 Alibaba Group Holding Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef GRAPE_UTILS_BITSET_H_
#define GRAPE_UTILS_BITSET_H_
#include <stdlib.h>
#include <algorithm>
#include <utility>
#define WORD_SIZE(n) (((n) + 63ul) >> 6)
#define WORD_INDEX(i) ((i) >> 6)
#define BIT_OFFSET(i) ((i) &0x3f)
#define ROUND_UP(i) (((i) + 63ul) & (~63ul))
#define ROUND_DOWN(i) ((i) & (~63ul))
namespace grape {
/**
* @brief Bitset is a highly-optimized bitset implementation.
*/
class Bitset {
public:
Bitset() : data_(NULL), size_(0), size_in_words_(0) {}
explicit Bitset(size_t size) : size_(size) {
size_in_words_ = WORD_SIZE(size_);
data_ = static_cast<uint64_t*>(malloc(size_in_words_ * sizeof(uint64_t)));
clear();
}
~Bitset() {
if (data_ != NULL) {
free(data_);
}
}
void init(size_t size) {
if (data_ != NULL) {
free(data_);
}
size_ = size;
size_in_words_ = WORD_SIZE(size_);
data_ = static_cast<uint64_t*>(malloc(size_in_words_ * sizeof(uint64_t)));
clear();
}
void clear() {
for (size_t i = 0; i < size_in_words_; ++i) {
data_[i] = 0;
}
}
void parallel_clear(int thread_num) {
#pragma omp parallel for num_threads(thread_num)
for (size_t i = 0; i < size_in_words_; ++i) {
data_[i] = 0;
}
}
bool empty() const {
for (size_t i = 0; i < size_in_words_; ++i) {
if (data_[i]) {
return false;
}
}
return true;
}
bool partial_empty(size_t begin, size_t end) const {
end = std::min(end, size_);
size_t cont_beg = ROUND_UP(begin);
size_t cont_end = ROUND_DOWN(end);
size_t word_beg = WORD_INDEX(cont_beg);
size_t word_end = WORD_INDEX(cont_end);
for (size_t i = word_beg; i < word_end; ++i) {
if (data_[i]) {
return false;
}
}
if (cont_beg != begin) {
uint64_t first_word = data_[WORD_INDEX(begin)];
first_word = (first_word >> (64 - (cont_beg - begin)));
if (first_word) {
return false;
}
}
if (cont_end != end) {
uint64_t last_word = data_[WORD_INDEX(end)];
last_word = (last_word & ((1ul << (end - cont_end)) - 1));
if (last_word) {
return false;
}
}
return true;
}
bool get_bit(size_t i) const {
return data_[WORD_INDEX(i)] & (1ul << BIT_OFFSET(i));
}
void set_bit(size_t i) {
__sync_fetch_and_or(data_ + WORD_INDEX(i), 1ul << BIT_OFFSET(i));
}
bool set_bit_with_ret(size_t i) {
uint64_t mask = 1ul << BIT_OFFSET(i);
uint64_t ret = __sync_fetch_and_or(data_ + WORD_INDEX(i), mask);
return !(ret & mask);
}
void reset_bit(size_t i) {
__sync_fetch_and_and(data_ + WORD_INDEX(i), ~(1ul << BIT_OFFSET(i)));
}
bool reset_bit_with_ret(size_t i) {
uint64_t mask = 1ul << BIT_OFFSET(i);
uint64_t ret = __sync_fetch_and_and(data_ + WORD_INDEX(i), ~mask);
return (ret & mask);
}
void swap(Bitset& other) {
std::swap(size_, other.size_);
std::swap(data_, other.data_);
std::swap(size_in_words_, other.size_in_words_);
}
size_t count() const {
size_t ret = 0;
for (size_t i = 0; i < size_in_words_; ++i) {
ret += __builtin_popcountll(data_[i]);
}
return ret;
}
size_t parallel_count(int thread_num) const {
size_t ret = 0;
#pragma omp parallel for num_threads(thread_num) reduction(+ : ret)
for (size_t i = 0; i < size_in_words_; ++i) {
ret += __builtin_popcountll(data_[i]);
}
return ret;
}
size_t partial_count(size_t begin, size_t end) const {
size_t ret = 0;
size_t cont_beg = ROUND_UP(begin);
size_t cont_end = ROUND_DOWN(end);
size_t word_beg = WORD_INDEX(cont_beg);
size_t word_end = WORD_INDEX(cont_end);
for (size_t i = word_beg; i < word_end; ++i) {
ret += __builtin_popcountll(data_[i]);
}
if (cont_beg != begin) {
uint64_t first_word = data_[WORD_INDEX(begin)];
first_word = (first_word >> (64 - (cont_beg - begin)));
ret += __builtin_popcountll(first_word);
}
if (cont_end != end) {
uint64_t last_word = data_[WORD_INDEX(end)];
last_word = (last_word & ((1ul << (end - cont_end)) - 1));
ret += __builtin_popcountll(last_word);
}
return ret;
}
size_t parallel_partial_count(int thread_num, size_t begin,
size_t end) const {
size_t ret = 0;
size_t cont_beg = ROUND_UP(begin);
size_t cont_end = ROUND_DOWN(end);
size_t word_beg = WORD_INDEX(cont_beg);
size_t word_end = WORD_INDEX(cont_end);
#pragma omp parallel for num_threads(thread_num) reduction(+ : ret)
for (size_t i = word_beg; i < word_end; ++i) {
ret += __builtin_popcountll(data_[i]);
}
if (cont_beg != begin) {
uint64_t first_word = data_[WORD_INDEX(begin)];
first_word = (first_word >> (64 - (cont_beg - begin)));
ret += __builtin_popcountll(first_word);
}
if (cont_end != end) {
uint64_t last_word = data_[WORD_INDEX(end)];
last_word = (last_word & ((1ul << (end - cont_end)) - 1));
ret += __builtin_popcountll(last_word);
}
return ret;
}
inline uint64_t get_word(size_t i) const { return data_[WORD_INDEX(i)]; }
inline const uint64_t* get_word_ptr(size_t i) const {
return &data_[WORD_INDEX(i)];
}
private:
uint64_t* data_;
size_t size_;
size_t size_in_words_;
};
class RefBitset {
public:
RefBitset() : data(NULL) {}
RefBitset(void* d, size_t b, size_t e) {
data = static_cast<uint64_t*>(d);
begin = b / 64 * 64;
end = (e + 63) / 64 * 64;
data[0] &= (~((1ul << (b - begin)) - 1ul));
data[((end - begin) / 64) - 1] &= ((1ul << (64 - (end - e))) - 1ul);
}
~RefBitset() {}
bool get_bit(size_t loc) const {
return data[WORD_INDEX(loc - begin)] & (1ul << BIT_OFFSET(loc));
}
uint64_t get_word_by_index(size_t index) { return data[index]; }
size_t get_word_num() const { return (end - begin) / 64; }
uint64_t* data;
size_t begin;
size_t end;
};
#undef WORD_SIZE
#undef WORD_INDEX
#undef BIT_OFFSET
#undef ROUND_UP
#undef ROUND_DOWN
} // namespace grape
#endif // GRAPE_UTILS_BITSET_H_
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(16*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(16*t3+Nx+12,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),64*t4+62),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
convolution_3x3_pack4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4b-4a-inch/4a-64-outch/4b;
kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00[8] = (__fp16)k01[k];
g00[9] = (__fp16)k11[k];
g00[10] = (__fp16)k21[k];
g00[11] = (__fp16)k31[k];
g00[12] = (__fp16)k41[k];
g00[13] = (__fp16)k51[k];
g00[14] = (__fp16)k61[k];
g00[15] = (__fp16)k71[k];
g00[16] = (__fp16)k02[k];
g00[17] = (__fp16)k12[k];
g00[18] = (__fp16)k22[k];
g00[19] = (__fp16)k32[k];
g00[20] = (__fp16)k42[k];
g00[21] = (__fp16)k52[k];
g00[22] = (__fp16)k62[k];
g00[23] = (__fp16)k72[k];
g00[24] = (__fp16)k03[k];
g00[25] = (__fp16)k13[k];
g00[26] = (__fp16)k23[k];
g00[27] = (__fp16)k33[k];
g00[28] = (__fp16)k43[k];
g00[29] = (__fp16)k53[k];
g00[30] = (__fp16)k63[k];
g00[31] = (__fp16)k73[k];
g00 += 32;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k01[k];
g00[5] = (__fp16)k11[k];
g00[6] = (__fp16)k21[k];
g00[7] = (__fp16)k31[k];
g00[8] = (__fp16)k02[k];
g00[9] = (__fp16)k12[k];
g00[10] = (__fp16)k22[k];
g00[11] = (__fp16)k32[k];
g00[12] = (__fp16)k03[k];
g00[13] = (__fp16)k13[k];
g00[14] = (__fp16)k23[k];
g00[15] = (__fp16)k33[k];
g00 += 16;
}
}
}
}
static void conv3x3s1_winograd64_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
//size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][4];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float16x4_t _r00 = vld1_f16(r0);
float16x4_t _r01 = vld1_f16(r0 + 4);
float16x4_t _r02 = vld1_f16(r0 + 8);
float16x4_t _r03 = vld1_f16(r0 + 12);
float16x4_t _r04 = vld1_f16(r0 + 16);
float16x4_t _r05 = vld1_f16(r0 + 20);
float16x4_t _r06 = vld1_f16(r0 + 24);
float16x4_t _r07 = vld1_f16(r0 + 28);
float16x4_t _tmp0m = vfma_n_f16(vsub_f16(_r00, _r06), vsub_f16(_r04, _r02), 5.25f);
float16x4_t _tmp7m = vfma_n_f16(vsub_f16(_r07, _r01), vsub_f16(_r03, _r05), 5.25f);
vst1_f16(tmp[0][m], _tmp0m);
vst1_f16(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_r02, _r06), _r04, 4.25f);
float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float16x4_t _tmp1m = vadd_f16(_tmp12a, _tmp12b);
float16x4_t _tmp2m = vsub_f16(_tmp12a, _tmp12b);
vst1_f16(tmp[1][m], _tmp1m);
vst1_f16(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float16x4_t _tmp3m = vadd_f16(_tmp34a, _tmp34b);
float16x4_t _tmp4m = vsub_f16(_tmp34a, _tmp34b);
vst1_f16(tmp[3][m], _tmp3m);
vst1_f16(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float16x4_t _tmp56a = vfma_n_f16(_r06, vfms_n_f16(_r02, _r04, 1.25f), 4.f);
float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float16x4_t _tmp5m = vadd_f16(_tmp56a, _tmp56b);
float16x4_t _tmp6m = vsub_f16(_tmp56a, _tmp56b);
vst1_f16(tmp[5][m], _tmp5m);
vst1_f16(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 4;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 4;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 4;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 12;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 20;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float16x4_t _tmp00 = vld1_f16(tmp[m][0]);
float16x4_t _tmp01 = vld1_f16(tmp[m][1]);
float16x4_t _tmp02 = vld1_f16(tmp[m][2]);
float16x4_t _tmp03 = vld1_f16(tmp[m][3]);
float16x4_t _tmp04 = vld1_f16(tmp[m][4]);
float16x4_t _tmp05 = vld1_f16(tmp[m][5]);
float16x4_t _tmp06 = vld1_f16(tmp[m][6]);
float16x4_t _tmp07 = vld1_f16(tmp[m][7]);
float16x4_t _r0tm0 = vfma_n_f16(vsub_f16(_tmp00, _tmp06), vsub_f16(_tmp04, _tmp02), 5.25f);
float16x4_t _r0tm7 = vfma_n_f16(vsub_f16(_tmp07, _tmp01), vsub_f16(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float16x4_t _r0tm1 = vadd_f16(_tmp12a, _tmp12b);
float16x4_t _r0tm2 = vsub_f16(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float16x4_t _r0tm3 = vadd_f16(_tmp34a, _tmp34b);
float16x4_t _r0tm4 = vsub_f16(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float16x4_t _tmp56a = vfma_n_f16(_tmp06, vfms_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float16x4_t _r0tm5 = vadd_f16(_tmp56a, _tmp56b);
float16x4_t _r0tm6 = vsub_f16(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1_f16(r0_tm_0, _r0tm0);
vst1_f16(r0_tm_1, _r0tm1);
vst1_f16(r0_tm_2, _r0tm2);
vst1_f16(r0_tm_3, _r0tm3);
vst1_f16(r0_tm_4, _r0tm4);
vst1_f16(r0_tm_5, _r0tm5);
vst1_f16(r0_tm_6, _r0tm6);
vst1_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
__fp16* output0_tm = top_blob_tm.channel(p);
__fp16* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v28.8h, v4.8h, v0.h[4] \n"
"fmla v29.8h, v4.8h, v0.h[5] \n"
"fmla v30.8h, v4.8h, v0.h[6] \n"
"fmla v31.8h, v4.8h, v0.h[7] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v28.8h, v5.8h, v1.h[4] \n"
"fmla v29.8h, v5.8h, v1.h[5] \n"
"fmla v30.8h, v5.8h, v1.h[6] \n"
"fmla v31.8h, v5.8h, v1.h[7] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"fmla v28.8h, v6.8h, v2.h[4] \n"
"fmla v29.8h, v6.8h, v2.h[5] \n"
"fmla v30.8h, v6.8h, v2.h[6] \n"
"fmla v31.8h, v6.8h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"fmla v28.8h, v7.8h, v3.h[4] \n"
"fmla v29.8h, v7.8h, v3.h[5] \n"
"fmla v30.8h, v7.8h, v3.h[6] \n"
"fmla v31.8h, v7.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"ext v28.16b, v28.16b, v28.16b, #8 \n"
"ext v29.16b, v29.16b, v29.16b, #8 \n"
"ext v30.16b, v30.16b, v30.16b, #8 \n"
"ext v31.16b, v31.16b, v31.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
float16x8_t _sum0 = vdupq_n_f16(0.f);
for (int q = 0; q < inch; q++)
{
float16x4_t _r0 = vld1_f16(r0);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
_sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3);
kptr += 32;
r0 += 4;
}
vst1_f16(output0_tm, vget_low_f16(_sum0));
vst1_f16(output1_tm, vget_high_f16(_sum0));
output0_tm += 4;
output1_tm += 4;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v28.4h, v4.4h, v0.h[4] \n"
"fmla v29.4h, v4.4h, v0.h[5] \n"
"fmla v30.4h, v4.4h, v0.h[6] \n"
"fmla v31.4h, v4.4h, v0.h[7] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v28.4h, v5.4h, v1.h[4] \n"
"fmla v29.4h, v5.4h, v1.h[5] \n"
"fmla v30.4h, v5.4h, v1.h[6] \n"
"fmla v31.4h, v5.4h, v1.h[7] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"fmla v28.4h, v6.4h, v2.h[4] \n"
"fmla v29.4h, v6.4h, v2.h[5] \n"
"fmla v30.4h, v6.4h, v2.h[6] \n"
"fmla v31.4h, v6.4h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"fmla v28.4h, v7.4h, v3.h[4] \n"
"fmla v29.4h, v7.4h, v3.h[5] \n"
"fmla v30.4h, v7.4h, v3.h[6] \n"
"fmla v31.4h, v7.4h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
float16x4_t _sum0 = vdup_n_f16(0.f);
for (int q = 0; q < inch; q++)
{
float16x4_t _r0 = vld1_f16(r0);
float16x4_t _k0 = vld1_f16(kptr);
float16x4_t _k1 = vld1_f16(kptr + 4);
float16x4_t _k2 = vld1_f16(kptr + 8);
float16x4_t _k3 = vld1_f16(kptr + 12);
_sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3);
kptr += 16;
r0 += 4;
}
vst1_f16(output0_tm, _sum0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f);
__fp16 tmp[6][8][4];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float16x4_t _out0tm0 = vld1_f16(output0_tm_0);
float16x4_t _out0tm1 = vld1_f16(output0_tm_1);
float16x4_t _out0tm2 = vld1_f16(output0_tm_2);
float16x4_t _out0tm3 = vld1_f16(output0_tm_3);
float16x4_t _out0tm4 = vld1_f16(output0_tm_4);
float16x4_t _out0tm5 = vld1_f16(output0_tm_5);
float16x4_t _out0tm6 = vld1_f16(output0_tm_6);
float16x4_t _out0tm7 = vld1_f16(output0_tm_7);
float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2);
float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4);
float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6);
float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f));
float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1_f16(tmp[0][m], _tmp0m);
vst1_f16(tmp[2][m], _tmp2m);
vst1_f16(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f));
vst1_f16(tmp[1][m], _tmp1m);
vst1_f16(tmp[3][m], _tmp3m);
vst1_f16(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float16x4_t _tmp00 = vld1_f16(tmp[m][0]);
float16x4_t _tmp01 = vld1_f16(tmp[m][1]);
float16x4_t _tmp02 = vld1_f16(tmp[m][2]);
float16x4_t _tmp03 = vld1_f16(tmp[m][3]);
float16x4_t _tmp04 = vld1_f16(tmp[m][4]);
float16x4_t _tmp05 = vld1_f16(tmp[m][5]);
float16x4_t _tmp06 = vld1_f16(tmp[m][6]);
float16x4_t _tmp07 = vld1_f16(tmp[m][7]);
float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02);
float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04);
float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06);
float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1_f16(output0, _out00);
vst1_f16(output0 + 8, _out02);
vst1_f16(output0 + 16, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1_f16(output0 + 4, _out01);
vst1_f16(output0 + 12, _out03);
vst1_f16(output0 + 20, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
// 16 * 9
float16x8_t _k00_01 = vld1q_f16(kptr);
float16x8_t _k00_23 = vld1q_f16(kptr + 8);
float16x8_t _k01_01 = vld1q_f16(kptr + 16);
float16x8_t _k01_23 = vld1q_f16(kptr + 24);
float16x8_t _k02_01 = vld1q_f16(kptr + 32);
float16x8_t _k02_23 = vld1q_f16(kptr + 40);
float16x8_t _k10_01 = vld1q_f16(kptr + 48);
float16x8_t _k10_23 = vld1q_f16(kptr + 56);
float16x8_t _k11_01 = vld1q_f16(kptr + 64);
float16x8_t _k11_23 = vld1q_f16(kptr + 72);
float16x8_t _k12_01 = vld1q_f16(kptr + 80);
float16x8_t _k12_23 = vld1q_f16(kptr + 88);
float16x8_t _k20_01 = vld1q_f16(kptr + 96);
float16x8_t _k20_23 = vld1q_f16(kptr + 104);
float16x8_t _k21_01 = vld1q_f16(kptr + 112);
float16x8_t _k21_23 = vld1q_f16(kptr + 120);
float16x8_t _k22_01 = vld1q_f16(kptr + 128);
float16x8_t _k22_23 = vld1q_f16(kptr + 136);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02 r03 r04 r05
"ext v6.16b, %8.16b, %8.16b, #8 \n"
"fmla v10.4h, %8.4h, v0.h[0] \n"
"fmla v11.4h, %8.4h, v0.h[4] \n"
"fmla v12.4h, %8.4h, v1.h[0] \n"
"fmla v13.4h, %8.4h, v1.h[4] \n"
"fmla v10.4h, v6.4h, v0.h[1] \n"
"fmla v11.4h, v6.4h, v0.h[5] \n"
"fmla v12.4h, v6.4h, v1.h[1] \n"
"fmla v13.4h, v6.4h, v1.h[5] \n"
"ext v7.16b, %9.16b, %9.16b, #8 \n"
"fmla v10.4h, %9.4h, v0.h[2] \n"
"fmla v11.4h, %9.4h, v0.h[6] \n"
"fmla v12.4h, %9.4h, v1.h[2] \n"
"fmla v13.4h, %9.4h, v1.h[6] \n"
"fmla v10.4h, v7.4h, v0.h[3] \n"
"fmla v11.4h, v7.4h, v0.h[7] \n"
"fmla v12.4h, v7.4h, v1.h[3] \n"
"fmla v13.4h, v7.4h, v1.h[7] \n"
"ext v8.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v0.h[4] \n"
"fmla v11.4h, %10.4h, v1.h[0] \n"
"fmla v12.4h, %10.4h, v1.h[4] \n"
"fmla v13.4h, %10.4h, v2.h[0] \n"
"fmla v10.4h, v8.4h, v0.h[5] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"fmla v12.4h, v8.4h, v1.h[5] \n"
"fmla v13.4h, v8.4h, v2.h[1] \n"
"ext v9.16b, %11.16b, %11.16b, #8 \n"
"fmla v10.4h, %11.4h, v0.h[6] \n"
"fmla v11.4h, %11.4h, v1.h[2] \n"
"fmla v12.4h, %11.4h, v1.h[6] \n"
"fmla v13.4h, %11.4h, v2.h[2] \n"
"fmla v10.4h, v9.4h, v0.h[7] \n"
"fmla v11.4h, v9.4h, v1.h[3] \n"
"fmla v12.4h, v9.4h, v1.h[7] \n"
"fmla v13.4h, v9.4h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12 r13 r14 r15
"ext v6.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v1.h[0] \n"
"fmla v11.4h, %12.4h, v1.h[4] \n"
"fmla v12.4h, %12.4h, v2.h[0] \n"
"fmla v13.4h, %12.4h, v2.h[4] \n"
"fmla v10.4h, v6.4h, v1.h[1] \n"
"fmla v11.4h, v6.4h, v1.h[5] \n"
"fmla v12.4h, v6.4h, v2.h[1] \n"
"fmla v13.4h, v6.4h, v2.h[5] \n"
"ext v7.16b, %13.16b, %13.16b, #8 \n"
"fmla v10.4h, %13.4h, v1.h[2] \n"
"fmla v11.4h, %13.4h, v1.h[6] \n"
"fmla v12.4h, %13.4h, v2.h[2] \n"
"fmla v13.4h, %13.4h, v2.h[6] \n"
"fmla v10.4h, v7.4h, v1.h[3] \n"
"fmla v11.4h, v7.4h, v1.h[7] \n"
"fmla v12.4h, v7.4h, v2.h[3] \n"
"fmla v13.4h, v7.4h, v2.h[7] \n"
"ext v8.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v3.h[0] \n"
"fmla v11.4h, %14.4h, v3.h[4] \n"
"fmla v12.4h, %14.4h, v4.h[0] \n"
"fmla v13.4h, %14.4h, v4.h[4] \n"
"fmla v10.4h, v8.4h, v3.h[1] \n"
"fmla v11.4h, v8.4h, v3.h[5] \n"
"fmla v12.4h, v8.4h, v4.h[1] \n"
"fmla v13.4h, v8.4h, v4.h[5] \n"
"ext v9.16b, %15.16b, %15.16b, #8 \n"
"fmla v10.4h, %15.4h, v3.h[2] \n"
"fmla v11.4h, %15.4h, v3.h[6] \n"
"fmla v12.4h, %15.4h, v4.h[2] \n"
"fmla v13.4h, %15.4h, v4.h[6] \n"
"fmla v10.4h, v9.4h, v3.h[3] \n"
"fmla v11.4h, v9.4h, v3.h[7] \n"
"fmla v12.4h, v9.4h, v4.h[3] \n"
"fmla v13.4h, v9.4h, v4.h[7] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v3.h[4] \n"
"fmla v11.4h, %16.4h, v4.h[0] \n"
"fmla v12.4h, %16.4h, v4.h[4] \n"
"fmla v13.4h, %16.4h, v5.h[0] \n"
"fmla v10.4h, v6.4h, v3.h[5] \n"
"fmla v11.4h, v6.4h, v4.h[1] \n"
"fmla v12.4h, v6.4h, v4.h[5] \n"
"fmla v13.4h, v6.4h, v5.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v10.4h, %17.4h, v3.h[6] \n"
"fmla v11.4h, %17.4h, v4.h[2] \n"
"fmla v12.4h, %17.4h, v4.h[6] \n"
"fmla v13.4h, %17.4h, v5.h[2] \n"
"fmla v10.4h, v7.4h, v3.h[7] \n"
"fmla v11.4h, v7.4h, v4.h[3] \n"
"fmla v12.4h, v7.4h, v4.h[7] \n"
"fmla v13.4h, v7.4h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22 r23 r24 r25
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v4.h[0] \n"
"fmla v11.4h, %18.4h, v4.h[4] \n"
"fmla v12.4h, %18.4h, v5.h[0] \n"
"fmla v13.4h, %18.4h, v5.h[4] \n"
"fmla v10.4h, v8.4h, v4.h[1] \n"
"fmla v11.4h, v8.4h, v4.h[5] \n"
"fmla v12.4h, v8.4h, v5.h[1] \n"
"fmla v13.4h, v8.4h, v5.h[5] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v10.4h, %19.4h, v4.h[2] \n"
"fmla v11.4h, %19.4h, v4.h[6] \n"
"fmla v12.4h, %19.4h, v5.h[2] \n"
"fmla v13.4h, %19.4h, v5.h[6] \n"
"fmla v10.4h, v9.4h, v4.h[3] \n"
"fmla v11.4h, v9.4h, v4.h[7] \n"
"fmla v12.4h, v9.4h, v5.h[3] \n"
"fmla v13.4h, v9.4h, v5.h[7] \n"
"ext v6.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, %20.4h, v0.h[4] \n"
"fmla v12.4h, %20.4h, v1.h[0] \n"
"fmla v13.4h, %20.4h, v1.h[4] \n"
"fmla v10.4h, v6.4h, v0.h[1] \n"
"fmla v11.4h, v6.4h, v0.h[5] \n"
"fmla v12.4h, v6.4h, v1.h[1] \n"
"fmla v13.4h, v6.4h, v1.h[5] \n"
"ext v7.16b, %21.16b, %21.16b, #8 \n"
"fmla v10.4h, %21.4h, v0.h[2] \n"
"fmla v11.4h, %21.4h, v0.h[6] \n"
"fmla v12.4h, %21.4h, v1.h[2] \n"
"fmla v13.4h, %21.4h, v1.h[6] \n"
"fmla v10.4h, v7.4h, v0.h[3] \n"
"fmla v11.4h, v7.4h, v0.h[7] \n"
"fmla v12.4h, v7.4h, v1.h[3] \n"
"fmla v13.4h, v7.4h, v1.h[7] \n"
"ext v8.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v0.h[4] \n"
"fmla v11.4h, %22.4h, v1.h[0] \n"
"fmla v12.4h, %22.4h, v1.h[4] \n"
"fmla v13.4h, %22.4h, v2.h[0] \n"
"fmla v10.4h, v8.4h, v0.h[5] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"fmla v12.4h, v8.4h, v1.h[5] \n"
"fmla v13.4h, v8.4h, v2.h[1] \n"
"ext v9.16b, %23.16b, %23.16b, #8 \n"
"fmla v10.4h, %23.4h, v0.h[6] \n"
"fmla v11.4h, %23.4h, v1.h[2] \n"
"fmla v12.4h, %23.4h, v1.h[6] \n"
"fmla v13.4h, %23.4h, v2.h[2] \n"
"fmla v10.4h, v9.4h, v0.h[7] \n"
"fmla v11.4h, v9.4h, v1.h[3] \n"
"fmla v12.4h, v9.4h, v1.h[7] \n"
"fmla v13.4h, v9.4h, v2.h[3] \n"
"ext v6.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v1.h[0] \n"
"fmla v11.4h, %24.4h, v1.h[4] \n"
"fmla v12.4h, %24.4h, v2.h[0] \n"
"fmla v13.4h, %24.4h, v2.h[4] \n"
"add %1, %1, #32 \n"
"fmla v10.4h, v6.4h, v1.h[1] \n"
"fmla v11.4h, v6.4h, v1.h[5] \n"
"fmla v12.4h, v6.4h, v2.h[1] \n"
"fmla v13.4h, v6.4h, v2.h[5] \n"
"ext v7.16b, %25.16b, %25.16b, #8 \n"
"fmla v10.4h, %25.4h, v1.h[2] \n"
"fmla v11.4h, %25.4h, v1.h[6] \n"
"fmla v12.4h, %25.4h, v2.h[2] \n"
"fmla v13.4h, %25.4h, v2.h[6] \n"
"add %2, %2, #32 \n"
"fmla v10.4h, v7.4h, v1.h[3] \n"
"fmla v11.4h, v7.4h, v1.h[7] \n"
"fmla v12.4h, v7.4h, v2.h[3] \n"
"fmla v13.4h, v7.4h, v2.h[7] \n"
"add %3, %3, #32 \n"
"st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.8h, v1.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v12.4h, v13.4h}, [%0] \n" // sum0 sum1
"ext v4.16b, %8.16b, %8.16b, #8 \n"
"fmul v10.4h, %8.4h, v0.h[0] \n"
"fmul v11.4h, %8.4h, v0.h[4] \n"
"fmla v12.4h, v4.4h, v0.h[1] \n"
"fmla v13.4h, v4.4h, v0.h[5] \n"
"ext v5.16b, %9.16b, %9.16b, #8 \n"
"fmla v10.4h, %9.4h, v0.h[2] \n"
"fmla v11.4h, %9.4h, v0.h[6] \n"
"fmla v12.4h, v5.4h, v0.h[3] \n"
"fmla v13.4h, v5.4h, v0.h[7] \n"
"ext v6.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v0.h[4] \n"
"fmla v11.4h, %10.4h, v1.h[0] \n"
"fmla v12.4h, v6.4h, v0.h[5] \n"
"fmla v13.4h, v6.4h, v1.h[1] \n"
"ext v7.16b, %11.16b, %11.16b, #8 \n"
"fmla v10.4h, %11.4h, v0.h[6] \n"
"fmla v11.4h, %11.4h, v1.h[2] \n"
"fmla v12.4h, v7.4h, v0.h[7] \n"
"fmla v13.4h, v7.4h, v1.h[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.8h, v3.8h}, [%2] \n" // r10 r11 r12 r13
"ext v8.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v1.h[0] \n"
"fmla v11.4h, %12.4h, v1.h[4] \n"
"fmla v12.4h, v8.4h, v1.h[1] \n"
"fmla v13.4h, v8.4h, v1.h[5] \n"
"ext v9.16b, %13.16b, %13.16b, #8 \n"
"fmla v10.4h, %13.4h, v1.h[2] \n"
"fmla v11.4h, %13.4h, v1.h[6] \n"
"fmla v12.4h, v9.4h, v1.h[3] \n"
"fmla v13.4h, v9.4h, v1.h[7] \n"
"ext v4.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v2.h[0] \n"
"fmla v11.4h, %14.4h, v2.h[4] \n"
"fmla v12.4h, v4.4h, v2.h[1] \n"
"fmla v13.4h, v4.4h, v2.h[5] \n"
"ext v5.16b, %15.16b, %15.16b, #8 \n"
"fmla v10.4h, %15.4h, v2.h[2] \n"
"fmla v11.4h, %15.4h, v2.h[6] \n"
"fmla v12.4h, v5.4h, v2.h[3] \n"
"fmla v13.4h, v5.4h, v2.h[7] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v2.h[4] \n"
"fmla v11.4h, %16.4h, v3.h[0] \n"
"fmla v12.4h, v6.4h, v2.h[5] \n"
"fmla v13.4h, v6.4h, v3.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v10.4h, %17.4h, v2.h[6] \n"
"fmla v11.4h, %17.4h, v3.h[2] \n"
"fmla v12.4h, v7.4h, v2.h[7] \n"
"fmla v13.4h, v7.4h, v3.h[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.8h, v1.8h}, [%3] \n" // r20 r21 r22 r23
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v3.h[0] \n"
"fmla v11.4h, %18.4h, v3.h[4] \n"
"fmla v12.4h, v8.4h, v3.h[1] \n"
"fmla v13.4h, v8.4h, v3.h[5] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v10.4h, %19.4h, v3.h[2] \n"
"fmla v11.4h, %19.4h, v3.h[6] \n"
"fmla v12.4h, v9.4h, v3.h[3] \n"
"fmla v13.4h, v9.4h, v3.h[7] \n"
"ext v4.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, %20.4h, v0.h[4] \n"
"fmla v12.4h, v4.4h, v0.h[1] \n"
"fmla v13.4h, v4.4h, v0.h[5] \n"
"ext v5.16b, %21.16b, %21.16b, #8 \n"
"fmla v10.4h, %21.4h, v0.h[2] \n"
"fmla v11.4h, %21.4h, v0.h[6] \n"
"fmla v12.4h, v5.4h, v0.h[3] \n"
"fmla v13.4h, v5.4h, v0.h[7] \n"
"ext v6.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v0.h[4] \n"
"fmla v11.4h, %22.4h, v1.h[0] \n"
"fmla v12.4h, v6.4h, v0.h[5] \n"
"fmla v13.4h, v6.4h, v1.h[1] \n"
"ext v7.16b, %23.16b, %23.16b, #8 \n"
"fmla v10.4h, %23.4h, v0.h[6] \n"
"fmla v11.4h, %23.4h, v1.h[2] \n"
"fmla v12.4h, v7.4h, v0.h[7] \n"
"fmla v13.4h, v7.4h, v1.h[3] \n"
"ext v8.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v1.h[0] \n"
"fmla v11.4h, %24.4h, v1.h[4] \n"
"fmla v12.4h, v8.4h, v1.h[1] \n"
"fmla v13.4h, v8.4h, v1.h[5] \n"
"ext v9.16b, %25.16b, %25.16b, #8 \n"
"fmla v10.4h, %25.4h, v1.h[2] \n"
"fmla v11.4h, %25.4h, v1.h[6] \n"
"fmla v12.4h, v9.4h, v1.h[3] \n"
"fmla v13.4h, v9.4h, v1.h[7] \n"
"add %1, %1, #16 \n"
"fadd v10.4h, v10.4h, v12.4h \n"
"add %2, %2, #16 \n"
"fadd v11.4h, v11.4h, v13.4h \n"
"add %3, %3, #16 \n"
"st1 {v10.4h, v11.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v13.4h}, [%0] \n" // sum0
"ext v6.16b, %8.16b, %8.16b, #8 \n"
"fmul v10.4h, %8.4h, v0.h[0] \n"
"fmul v11.4h, v6.4h, v0.h[1] \n"
"ext v7.16b, %9.16b, %9.16b, #8 \n"
"fmul v12.4h, %9.4h, v0.h[2] \n"
"fmla v13.4h, v7.4h, v0.h[3] \n"
"ext v8.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v1.h[0] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"ext v9.16b, %11.16b, %11.16b, #8 \n"
"fmla v12.4h, %11.4h, v1.h[2] \n"
"fmla v13.4h, v9.4h, v1.h[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12
"ext v6.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v2.h[0] \n"
"fmla v11.4h, v6.4h, v2.h[1] \n"
"ext v7.16b, %13.16b, %13.16b, #8 \n"
"fmla v12.4h, %13.4h, v2.h[2] \n"
"fmla v13.4h, v7.4h, v2.h[3] \n"
"ext v8.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v3.h[0] \n"
"fmla v11.4h, v8.4h, v3.h[1] \n"
"ext v9.16b, %15.16b, %15.16b, #8 \n"
"fmla v12.4h, %15.4h, v3.h[2] \n"
"fmla v13.4h, v9.4h, v3.h[3] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v4.h[0] \n"
"fmla v11.4h, v6.4h, v4.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v12.4h, %17.4h, v4.h[2] \n"
"fmla v13.4h, v7.4h, v4.h[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v5.h[0] \n"
"fmla v11.4h, v8.4h, v5.h[1] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v12.4h, %19.4h, v5.h[2] \n"
"fmla v13.4h, v9.4h, v5.h[3] \n"
"ext v6.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, v6.4h, v0.h[1] \n"
"ext v7.16b, %21.16b, %21.16b, #8 \n"
"fmla v12.4h, %21.4h, v0.h[2] \n"
"fmla v13.4h, v7.4h, v0.h[3] \n"
"ext v8.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v1.h[0] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"ext v9.16b, %23.16b, %23.16b, #8 \n"
"fmla v12.4h, %23.4h, v1.h[2] \n"
"fmla v13.4h, v9.4h, v1.h[3] \n"
"ext v6.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v2.h[0] \n"
"fmla v11.4h, v6.4h, v2.h[1] \n"
"ext v7.16b, %25.16b, %25.16b, #8 \n"
"fmla v12.4h, %25.4h, v2.h[2] \n"
"fmla v13.4h, v7.4h, v2.h[3] \n"
"fadd v10.4h, v10.4h, v11.4h \n"
"add %1, %1, #8 \n"
"fadd v12.4h, v12.4h, v13.4h \n"
"add %2, %2, #8 \n"
"fadd v10.4h, v10.4h, v12.4h \n"
"add %3, %3, #8 \n"
"st1 {v10.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
r0 += 8;
r1 += 8;
r2 += 8;
}
}
}
}
|
DRB063-outeronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
int n=100, m=100;
double b[100][100];
void foo()
{
int i,j;
#pragma omp parallel for private(j) schedule(dynamic)
for (i=0;i<n;i++)
for (j=0;j<m-1;j++) // Be careful about bounds of j
b[i][j]=b[i][j+1];
}
int main()
{
foo();
return 0;
}
|
fields_values.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \
__LINE__); \
exit(1); \
}
#if defined(_WIN32)
#include <windows.h>
#define getpid _getpid
typedef int pid_t;
#define gettid GetCurrentThreadId
#define my_gethostname(buf, sz) GetComputerNameA(buf, &(sz))
#else
#include <unistd.h>
#include <sys/types.h>
#define my_gethostname(buf, sz) gethostname(buf, sz)
#endif
#define BUFFER_SIZE 256
int get_integer() {
int n, retval;
char buf[BUFFER_SIZE];
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
check(needed < BUFFER_SIZE);
n = sscanf(buf, "%d", &retval);
check(n == 1);
return retval;
}
char* get_string() {
int n, retval;
char buf[BUFFER_SIZE];
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
check(needed < BUFFER_SIZE);
return strdup(buf);
}
void check_integer(const char* formats[2], int(*func)()) {
int i;
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
check(get_integer() == func());
#pragma omp parallel num_threads(3)
{
check(get_integer() == func());
}
check(get_integer() == func());
}
}
}
void check_nesting_level() {
// Check %{nesting_level} and %L
const char* formats[2] = {"%{nesting_level}", "%L"};
check_integer(formats, omp_get_level);
}
void check_thread_num() {
// Check %{thread_num} and %n
const char* formats[2] = {"%{thread_num}", "%n"};
check_integer(formats, omp_get_thread_num);
}
void check_num_threads() {
// Check %{num_threads} and %N
const char* formats[2] = {"%{num_threads}", "%N"};
check_integer(formats, omp_get_num_threads);
}
int ancestor_helper() {
return omp_get_ancestor_thread_num(omp_get_level() - 1);
}
void check_ancestor_tnum() {
// Check %{ancestor_tnum} and %a
const char* formats[2] = {"%{ancestor_tnum}", "%a"};
check_integer(formats, ancestor_helper);
}
int my_get_pid() { return (int)getpid(); }
void check_process_id() {
// Check %{process_id} and %P
const char* formats[2] = {"%{process_id}", "%P"};
check_integer(formats, my_get_pid);
}
/*
int my_get_tid() { return (int)gettid(); }
void check_native_thread_id() {
// Check %{native_thread_id} and %i
const char* formats[2] = {"%{native_thread_id}", "%i"};
check_integer(formats, my_get_tid);
}
*/
void check_host() {
int i;
int buffer_size = 256;
const char* formats[2] = {"%{host}", "%H"};
char hostname[256];
my_gethostname(hostname, buffer_size);
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
char* host = get_string();
check(streqls(host, hostname));
free(host);
}
}
}
void check_undefined() {
int i;
const char* formats[2] = {"%{foobar}", "%X"};
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
char* undef = get_string();
check(streqls(undef, "undefined"));
free(undef);
}
}
}
int main(int argc, char** argv) {
omp_set_nested(1);
check_nesting_level();
check_num_threads();
check_ancestor_tnum();
check_process_id();
//check_native_thread_id();
check_host();
check_undefined();
return 0;
}
|
IntegratorHPMCMonoImplicit.h | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#ifndef __HPMC_MONO_IMPLICIT__H__
#define __HPMC_MONO_IMPLICIT__H__
#include "IntegratorHPMCMono.h"
#include "hoomd/Autotuner.h"
#include <random>
#ifdef _OPENMP
#include <omp.h>
#endif
/*! \file IntegratorHPMCMonoImplicit.h
\brief Defines the template class for HPMC with implicit generated depletant solvent
\note This header cannot be compiled by nvcc
*/
#ifdef NVCC
#error This header cannot be compiled by nvcc
#endif
#include <hoomd/extern/pybind/include/pybind11/pybind11.h>
namespace hpmc
{
//! Template class for HPMC update with implicit depletants
/*!
Depletants are generated randomly on the fly according to the semi-grand canonical ensemble.
The penetrable depletants model is simulated.
\ingroup hpmc_integrators
*/
template< class Shape >
class IntegratorHPMCMonoImplicit : public IntegratorHPMCMono<Shape>
{
public:
//! Construct the integrator
IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed);
//! Destructor
virtual ~IntegratorHPMCMonoImplicit();
//! Set the depletant density in the free volume
void setDepletantDensity(Scalar n_R)
{
m_n_R = n_R;
m_need_initialize_poisson = true;
}
//! Set the type of depletant particle
void setDepletantType(unsigned int type)
{
m_type = type;
}
//! Number of depletant-reinsertions
/*! \param n_trial Depletant reinsertions per overlapping depletant
*/
void setNTrial(unsigned int n_trial)
{
m_n_trial = n_trial;
}
//! Return number of depletant re-insertions
unsigned int getNTrial()
{
return m_n_trial;
}
//! Returns the depletant density
Scalar getDepletantDensity()
{
return m_n_R;
}
//! Return the depletant type
unsigned int getDepletantType()
{
return m_type;
}
//! Return the number of re-insertion trials
unsigned int getNumTrials() const
{
return m_n_trial;
}
//! Reset statistics counters
virtual void resetStats()
{
IntegratorHPMCMono<Shape>::resetStats();
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
m_implicit_count_run_start = h_counters.data[0];
}
//! Print statistics about the hpmc steps taken
virtual void printStats()
{
IntegratorHPMCMono<Shape>::printStats();
hpmc_implicit_counters_t result = getImplicitCounters(1);
double cur_time = double(this->m_clock.getTime()) / Scalar(1e9);
this->m_exec_conf->msg->notice(2) << "-- Implicit depletants stats:" << "\n";
this->m_exec_conf->msg->notice(2) << "Depletant insertions per second: "
<< double(result.insert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Configurational bias attempts per second: "
<< double(result.reinsert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of depletants in free volume: "
<< result.getFreeVolumeFraction() << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of overlapping depletants: "
<< result.getOverlapFraction()<< "\n";
}
//! Get the current counter values
hpmc_implicit_counters_t getImplicitCounters(unsigned int mode=0);
/* \returns a list of provided quantities
*/
std::vector< std::string > getProvidedLogQuantities()
{
// start with the integrator provided quantities
std::vector< std::string > result = IntegratorHPMCMono<Shape>::getProvidedLogQuantities();
// then add ours
result.push_back("hpmc_fugacity");
result.push_back("hpmc_ntrial");
result.push_back("hpmc_insert_count");
result.push_back("hpmc_reinsert_count");
result.push_back("hpmc_free_volume_fraction");
result.push_back("hpmc_overlap_fraction");
result.push_back("hpmc_configurational_bias_ratio");
return result;
}
//! Get the value of a logged quantity
virtual Scalar getLogValue(const std::string& quantity, unsigned int timestep);
//! Method to scale the box
virtual bool attemptBoxResize(unsigned int timestep, const BoxDim& new_box);
//! Slot to be called when number of types changes
void slotNumTypesChange();
protected:
Scalar m_n_R; //!< Averge depletant number density in free volume
unsigned int m_type; //!< Type of depletant particle to generate
GPUArray<hpmc_implicit_counters_t> m_implicit_count; //!< Counter of active cell cluster moves
hpmc_implicit_counters_t m_implicit_count_run_start; //!< Counter of active cell cluster moves at run start
hpmc_implicit_counters_t m_implicit_count_step_start; //!< Counter of active cell cluster moves at run start
std::vector<std::poisson_distribution<unsigned int> > m_poisson; //!< Poisson distribution
std::vector<Scalar> m_lambda; //!< Poisson distribution parameters per type
Scalar m_d_dep; //!< Depletant circumsphere diameter
GPUArray<Scalar> m_d_min; //!< Minimum sphere from which test depletant is excluded
GPUArray<Scalar> m_d_max; //!< Maximum sphere for test depletant insertion
std::vector<hoomd::detail::Saru> m_rng_depletant; //!< RNGs for depletant insertion
bool m_rng_initialized; //!< True if RNGs have been initialized
unsigned int m_n_trial; //!< Number of trial re-insertions per depletant
bool m_need_initialize_poisson; //!< Flag to tell if we need to initialize the poisson distribution
//! Take one timestep forward
virtual void update(unsigned int timestep);
//! Initalize Poisson distribution parameters
virtual void updatePoissonParameters();
//! Initialize the Poisson distributions
virtual void initializePoissonDistribution();
//! Set the nominal width appropriate for depletion interaction
virtual void updateCellWidth();
//! Generate a random depletant position in a sphere around a particle
template<class RNG>
inline void generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar d_min,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants);
/*! Generate a random depletant position in a region including the sphere around a particle,
restricted so that it does not intersect another sphere
*/
template<class RNG>
inline void generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar delta_other,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants,
vec3<Scalar> pos_sphere_other);
//! Try inserting a depletant in a configuration such that it overlaps with the particle in the old (new) configuration
inline bool insertDepletant(vec3<Scalar>& pos_depletant, const Shape& shape_depletant, unsigned int idx,
typename Shape::param_type *h_params, unsigned int *h_overlaps, unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation,
vec3<Scalar> pos_new, quat<Scalar>& orientation_new, const typename Shape::param_type& params_new,
unsigned int &overlap_checks, unsigned int &overlap_err_count, bool &overlap_shape, bool new_config);
};
/*! \param sysdef System definition
\param cl Cell list
\param seed Random number generator seed
NOTE: only 3d supported at this time
*/
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed)
: IntegratorHPMCMono<Shape>(sysdef, seed), m_n_R(0), m_type(0), m_d_dep(0.0), m_rng_initialized(false), m_n_trial(0),
m_need_initialize_poisson(true)
{
this->m_exec_conf->msg->notice(5) << "Constructing IntegratorHPMCImplicit" << std::endl;
GPUArray<hpmc_implicit_counters_t> implicit_count(1,this->m_exec_conf);
m_implicit_count.swap(implicit_count);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
if (this->m_sysdef->getNDimensions() == 2)
{
throw std::runtime_error("2D runs not supported for this integrator.");
}
}
//! Destructor
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::~IntegratorHPMCMonoImplicit()
{
}
template <class Shape>
void IntegratorHPMCMonoImplicit<Shape>::slotNumTypesChange()
{
// call parent class method
IntegratorHPMCMono<Shape>::slotNumTypesChange();
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_need_initialize_poisson = true;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updatePoissonParameters()
{
ArrayHandle<typename Shape::param_type> h_params(this->m_params, access_location::host, access_mode::read);
// Depletant diameter
quat<Scalar> o;
Shape shape_depletant(o, h_params.data[this->m_type]);
m_d_dep = shape_depletant.getCircumsphereDiameter();
// access GPUArrays
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::overwrite);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::overwrite);
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// test sphere diameter and volume
Shape shape_i(quat<Scalar>(), h_params.data[i_type]);
Scalar delta = shape_i.getCircumsphereDiameter()+m_d_dep;
h_d_max.data[i_type] = delta;
// volume of insertion sphere
Scalar V = Scalar(M_PI/6.0)*delta*delta*delta;
// Minimum diameter of colloid sphere in which depletant can be inserted without overlapping with other colloids
// Scalar d = std::max(Scalar(2.0)*shape_i.getInsphereRadius()-m_d_dep,0.0);
Scalar d = Scalar(0.0);
h_d_min.data[i_type] = d;
// subtract inner sphere from sampling volume
V -= Scalar(M_PI/6.0)*d*d*d;
// average number of depletants in volume
m_lambda[i_type] = this->m_n_R*V;
}
}
template<class Shape>
void IntegratorHPMCMonoImplicit< Shape >::initializePoissonDistribution()
{
m_poisson.resize(this->m_pdata->getNTypes());
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// parameter for Poisson distribution
Scalar lambda = m_lambda[i_type];
if (lambda <= Scalar(0.0))
{
// guard against invalid parameters
continue;
}
m_poisson[i_type] = std::poisson_distribution<unsigned int>(lambda);
}
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updateCellWidth()
{
this->m_nominal_width = this->getMaxDiameter();
if (m_n_R > Scalar(0.0))
{
// add range of depletion interaction
ArrayHandle<typename Shape::param_type> h_params(this->m_params, access_location::host, access_mode::read);
quat<Scalar> o;
Shape tmp(o, h_params.data[m_type]);
this->m_nominal_width += tmp.getCircumsphereDiameter();
}
this->m_exec_conf->msg->notice(5) << "IntegratorHPMCMonoImplicit: updating nominal width to " << this->m_nominal_width << std::endl;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::update(unsigned int timestep)
{
this->m_exec_conf->msg->notice(10) << "HPMCMonoImplicit update: " << timestep << std::endl;
IntegratorHPMC::update(timestep);
// update poisson distributions
if (m_need_initialize_poisson)
{
updatePoissonParameters();
initializePoissonDistribution();
m_need_initialize_poisson = false;
}
if (!m_rng_initialized)
{
unsigned int n_omp_threads = 1;
#ifdef _OPENMP
n_omp_threads = omp_get_max_threads();
#endif
// initialize a set of random number generators
for (unsigned int i = 0; i < n_omp_threads; ++i)
{
m_rng_depletant.push_back(hoomd::detail::Saru(timestep,this->m_seed+this->m_exec_conf->getRank(), i));
}
m_rng_initialized = true;
}
// get needed vars
ArrayHandle<hpmc_counters_t> h_counters(this->m_count_total, access_location::host, access_mode::readwrite);
hpmc_counters_t& counters = h_counters.data[0];
ArrayHandle<hpmc_implicit_counters_t> h_implicit_counters(m_implicit_count, access_location::host, access_mode::readwrite);
hpmc_implicit_counters_t& implicit_counters = h_implicit_counters.data[0];
m_implicit_count_step_start = implicit_counters;
const BoxDim& box = this->m_pdata->getBox();
unsigned int ndim = this->m_sysdef->getNDimensions();
#ifdef ENABLE_MPI
// compute the width of the active region
Scalar3 npd = box.getNearestPlaneDistance();
Scalar3 ghost_fraction = this->m_nominal_width / npd;
#endif
// Shuffle the order of particles for this step
this->m_update_order.resize(this->m_pdata->getN());
this->m_update_order.shuffle(timestep);
// update the AABB Tree
this->buildAABBTree();
// limit m_d entries so that particles cannot possibly wander more than one box image in one time step
this->limitMoveDistances();
// update the image list
this->updateImageList();
// combine the three seeds
std::vector<unsigned int> seed_seq(3);
seed_seq[0] = this->m_seed;
seed_seq[1] = timestep;
seed_seq[2] = this->m_exec_conf->getRank();
std::seed_seq seed(seed_seq.begin(), seed_seq.end());
// RNG for poisson distribution
std::mt19937 rng_poisson(seed);
if (this->m_prof) this->m_prof->push(this->m_exec_conf, "HPMC implicit");
// access depletant insertion sphere dimensions
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::read);
// loop over local particles nselect times
for (unsigned int i_nselect = 0; i_nselect < this->m_nselect; i_nselect++)
{
// access particle data and system box
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<Scalar4> h_orientation(this->m_pdata->getOrientationArray(), access_location::host, access_mode::readwrite);
// access parameters and interaction matrix
ArrayHandle<typename Shape::param_type> h_params(this->m_params, access_location::host, access_mode::read);
ArrayHandle<unsigned int> h_overlaps(this->m_overlaps, access_location::host, access_mode::read);
//access move sizes
ArrayHandle<Scalar> h_d(this->m_d, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_a(this->m_a, access_location::host, access_mode::read);
// loop through N particles in a shuffled order
for (unsigned int cur_particle = 0; cur_particle < this->m_pdata->getN(); cur_particle++)
{
unsigned int i = this->m_update_order[cur_particle];
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
Scalar4 orientation_i = h_orientation.data[i];
vec3<Scalar> pos_i = vec3<Scalar>(postype_i);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// only move particle if active
if (!isActive(make_scalar3(postype_i.x, postype_i.y, postype_i.z), box, ghost_fraction))
continue;
}
#endif
// make a trial move for i
hoomd::detail::Saru rng_i(i, this->m_seed + this->m_exec_conf->getRank()*this->m_nselect + i_nselect, timestep);
int typ_i = __scalar_as_int(postype_i.w);
Shape shape_i(quat<Scalar>(orientation_i), h_params.data[typ_i]);
unsigned int move_type_select = rng_i.u32() & 0xffff;
bool move_type_translate = !shape_i.hasOrientation() || (move_type_select < this->m_move_ratio);
if (move_type_translate)
{
move_translate(pos_i, rng_i, h_d.data[typ_i], ndim);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// check if particle has moved into the ghost layer, and skip if it is
if (!isActive(vec_to_scalar3(pos_i), box, ghost_fraction))
continue;
}
#endif
}
else
{
move_rotate(shape_i.orientation, rng_i, h_a.data[typ_i], ndim);
}
// check for overlaps with neighboring particle's positions
bool overlap=false;
detail::AABB aabb_i_local = shape_i.getAABB(vec3<Scalar>(0,0,0));
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_i_image = pos_i + this->m_image_list[cur_image];
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
Scalar4 postype_j;
Scalar4 orientation_j;
// handle j==i situations
if ( j != i )
{
// load the position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
}
else
{
if (cur_image == 0)
{
// in the first image, skip i == j
continue;
}
else
{
// If this is particle i and we are in an outside image, use the translated position and orientation
postype_j = make_scalar4(pos_i.x, pos_i.y, pos_i.z, postype_i.w);
orientation_j = quat_to_scalar4(shape_i.orientation);
}
}
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), h_params.data[typ_j]);
counters.overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_i.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(typ_i,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_i, shape_j, counters.overlap_err_count))
{
overlap = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
// whether the move is accepted
bool accept = !overlap;
if (!overlap)
{
// log of acceptance probability
Scalar lnb(0.0);
unsigned int zero = 0;
// The trial move is valid. Now generate random depletant particles in a sphere
// of radius (d_max+d_depletant+move size)/2.0 around the original particle position
// draw number from Poisson distribution
unsigned int n = 0;
if (m_lambda[typ_i] > Scalar(0.0))
{
n = m_poisson[typ_i](rng_poisson);
}
unsigned int n_overlap_checks = 0;
unsigned int overlap_err_count = 0;
unsigned int insert_count = 0;
unsigned int reinsert_count = 0;
unsigned int free_volume_count = 0;
unsigned int overlap_count = 0;
volatile bool flag=false;
#pragma omp parallel for reduction(+ : lnb, n_overlap_checks, overlap_err_count, insert_count, reinsert_count, free_volume_count, overlap_count) reduction(max: zero) shared(flag) if (n>0) schedule(dynamic)
for (unsigned int k = 0; k < n; ++k)
{
if (flag)
{
#ifndef _OPENMP
break;
#else
continue;
#endif
}
insert_count++;
// generate a random depletant coordinate and orientation in the sphere around the new position
vec3<Scalar> pos_test;
quat<Scalar> orientation_test;
#ifdef _OPENMP
unsigned int thread_idx = omp_get_thread_num();
#else
unsigned int thread_idx = 0;
#endif
generateDepletant(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], h_d_min.data[typ_i], pos_test,
orientation_test, h_params.data[m_type]);
Shape shape_test(orientation_test, h_params.data[m_type]);
detail::AABB aabb_test_local = shape_test.getAABB(vec3<Scalar>(0,0,0));
bool overlap_depletant = false;
// Check if the new configuration of particle i generates an overlap
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
vec3<Scalar> r_ij = pos_i - pos_test_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_i, overlap_err_count))
{
overlap_depletant = true;
overlap_count++;
break;
}
}
if (overlap_depletant)
{
// check against overlap with old position
bool overlap_old = false;
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
Scalar4 postype_j;
Scalar4 orientation_j;
// load the old position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_test_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), h_params.data[typ_j]);
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_j, overlap_err_count))
{
// depletant is ignored for any overlap in the old configuration
overlap_old = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap_old)
break;
} // end loop over AABB nodes
if (overlap_old)
break;
} // end loop over images
if (!overlap_old)
{
free_volume_count++;
}
else
{
// the depletant overlap doesn't count since it was already overlapping
// in the old configuration
overlap_depletant = false;
}
}
if (overlap_depletant && !m_n_trial)
{
zero = 1;
// break out of loop
flag = true;
}
else if (overlap_depletant && m_n_trial)
{
const typename Shape::param_type& params_depletant = h_params.data[m_type];
// Number of successful depletant insertions in new configuration
unsigned int n_success_new = 0;
// Number of allowed insertion trials (those which overlap with colloid at old position)
unsigned int n_overlap_shape_new = 0;
// diameter (around origin) in which we are guaruanteed to intersect with the shape
Scalar delta_insphere = Scalar(2.0)*shape_i.getInsphereRadius();
// same for old reverse move. Because we have already sampled one successful insertion
// that overlaps with the colloid at the new position, we increment by one (super-detailed
// balance)
unsigned int n_success_old = 1;
unsigned int n_overlap_shape_old = 1;
Scalar4& postype_i_old = h_postype.data[i];
vec3<Scalar> pos_i_old(postype_i_old);
quat<Scalar> orientation_i_old(h_orientation.data[i]);
for (unsigned int l = 0; l < m_n_trial; ++l)
{
// generate a random depletant position and orientation
// in both the old and the new configuration of the colloid particle
vec3<Scalar> pos_depletant_old, pos_depletant_new;
quat<Scalar> orientation_depletant_old, orientation_depletant_new;
// try moving the overlapping depletant in the excluded volume
// such that it overlaps with the particle at the old position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i_old, h_d_max.data[typ_i], delta_insphere,
pos_depletant_new, orientation_depletant_new, params_depletant, pos_i);
reinsert_count++;
Shape shape_depletant_new(orientation_depletant_new, params_depletant);
const typename Shape::param_type& params_i = h_params.data[__scalar_as_int(postype_i_old.w)];
bool overlap_shape = false;
if (insertDepletant(pos_depletant_new, shape_depletant_new, i, h_params.data, h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, false))
{
n_success_new++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at old position
n_overlap_shape_new++;
}
if (l >= 1)
{
// as above, in excluded volume sphere at new position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], delta_insphere,
pos_depletant_old, orientation_depletant_old, params_depletant, pos_i_old);
Shape shape_depletant_old(orientation_depletant_old, params_depletant);
if (insertDepletant(pos_depletant_old, shape_depletant_old, i, h_params.data, h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, true))
{
n_success_old++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at new position
n_overlap_shape_old++;
}
reinsert_count++;
}
n_overlap_checks += counters.overlap_checks;
overlap_err_count += counters.overlap_err_count;
} // end loop over re-insertion attempts
if (n_success_new != 0)
{
lnb += log((Scalar)n_success_new/(Scalar)n_overlap_shape_new);
lnb -= log((Scalar)n_success_old/(Scalar)n_overlap_shape_old);
}
else
{
zero = 1;
// break out of loop
flag = true;
}
} // end if depletant overlap
} // end loop over depletants
// increment counters
counters.overlap_checks += n_overlap_checks;
counters.overlap_err_count += overlap_err_count;
implicit_counters.insert_count += insert_count;
implicit_counters.free_volume_count += free_volume_count;
implicit_counters.overlap_count += overlap_count;
implicit_counters.reinsert_count += reinsert_count;
// apply acceptance criterium
if (!zero)
{
accept = rng_i.f() < exp(lnb);
}
else
{
accept = false;
}
} // end depletant placement
// if the move is accepted
if (accept)
{
// increment accept counter and assign new position
if (!shape_i.ignoreStatistics())
{
if (move_type_translate)
counters.translate_accept_count++;
else
counters.rotate_accept_count++;
}
// update the position of the particle in the tree for future updates
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i);
this->m_aabb_tree.update(i, aabb);
// update position of particle
h_postype.data[i] = make_scalar4(pos_i.x,pos_i.y,pos_i.z,postype_i.w);
if (shape_i.hasOrientation())
{
h_orientation.data[i] = quat_to_scalar4(shape_i.orientation);
}
}
else
{
if (!shape_i.ignoreStatistics())
{
// increment reject counter
if (move_type_translate)
counters.translate_reject_count++;
else
counters.rotate_reject_count++;
}
}
} // end loop over all particles
} // end loop over nselect
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// wrap particles back into box
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
box.wrap(h_postype.data[i], h_image.data[i]);
}
}
// perform the grid shift
#ifdef ENABLE_MPI
if (this->m_comm)
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// precalculate the grid shift
hoomd::detail::Saru rng(timestep, this->m_seed, 0xf4a3210e);
Scalar3 shift = make_scalar3(0,0,0);
shift.x = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
shift.y = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
if (this->m_sysdef->getNDimensions() == 3)
{
shift.z = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
}
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
vec3<Scalar> r_i = vec3<Scalar>(postype_i); // translation from local to global coordinates
r_i += vec3<Scalar>(shift);
h_postype.data[i] = vec_to_scalar4(r_i, postype_i.w);
box.wrap(h_postype.data[i], h_image.data[i]);
}
this->m_pdata->translateOrigin(shift);
}
#endif
if (this->m_prof) this->m_prof->pop(this->m_exec_conf);
// migrate and exchange particles
this->communicate(true);
// all particle have been moved, the aabb tree is now invalid
this->m_aabb_tree_invalid = true;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param d_min Diameter of smaller sphere excluding depletant
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar d_min, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant)
{
// draw a random vector in the excluded volume sphere of the colloid
Scalar theta = rng.template s<Scalar>(Scalar(0.0),Scalar(2.0*M_PI));
Scalar z = rng.template s<Scalar>(Scalar(-1.0),Scalar(1.0));
// random normalized vector
vec3<Scalar> n(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(theta),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(theta),z);
// draw random radial coordinate in test sphere
Scalar r3 = rng.template s<Scalar>(fast::pow(d_min/delta,Scalar(3.0)),Scalar(1.0));
Scalar r = Scalar(0.5)*delta*fast::pow(r3,Scalar(1.0/3.0));
// test depletant position
vec3<Scalar> pos_depletant = pos_sphere+r*n;
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
pos = pos_depletant;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param delta_other diameter of other sphere
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
* \params pos_sphere_other Center of other sphere
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar delta_other, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant,
vec3<Scalar> pos_sphere_other)
{
vec3<Scalar> r_ij = pos_sphere - pos_sphere_other;
Scalar d = fast::sqrt(dot(r_ij,r_ij));
Scalar rmin(0.0);
Scalar rmax = Scalar(0.5)*delta;
Scalar ctheta_min(-1.0);
bool do_rotate = false;
if (d > Scalar(0.0) && delta_other > Scalar(0.0))
{
// draw a random direction in the bounded sphereical shell
Scalar ctheta = (delta_other*delta_other+Scalar(4.0)*d*d-delta*delta)/(Scalar(4.0)*delta_other*d);
if (ctheta >= Scalar(-1.0) && ctheta < Scalar(1.0))
{
// true intersection, we can restrict angular sampling
ctheta_min = ctheta;
}
// is there an intersection?
if (Scalar(2.0)*d < delta+delta_other)
{
// sample in shell around smaller sphere
rmin = delta_other/Scalar(2.0);
rmax = d+delta/Scalar(2.0);
do_rotate = true;
}
}
// draw random radial coordinate in a spherical shell
Scalar r3 = rng.template s<Scalar>(fast::pow(rmin/rmax,Scalar(3.0)),Scalar(1.0));
Scalar r = rmax*fast::pow(r3,Scalar(1.0/3.0));
// random direction in spherical shell
Scalar z = rng.s(ctheta_min,Scalar(1.0));
Scalar phi = Scalar(2.0*M_PI)*rng.template s<Scalar>();
vec3<Scalar> n;
if (do_rotate)
{
vec3<Scalar> u(r_ij/d);
// normal vector
vec3<Scalar> v(cross(u,vec3<Scalar>(0,0,1)));
if (dot(v,v) < EPSILON)
{
v = cross(u,vec3<Scalar>(0,1,0));
}
v *= fast::rsqrt(dot(v,v));
quat<Scalar> q(quat<Scalar>::fromAxisAngle(u,phi));
n = z*u+(fast::sqrt(Scalar(1.0)-z*z))*rotate(q,v);
}
else
{
n = vec3<Scalar>(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(phi),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(phi),z);
}
// test depletant position
pos = r*n;
if (do_rotate)
{
// insert such that it potentially intersects the sphere, but not the other one
pos += pos_sphere_other;
}
else
{
// insert in sphere
pos += pos_sphere;
}
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
}
/*! \param pos_depletant Depletant position
* \param shape_depletant Depletant shape
* \param idx Index of updated particle
* \param h_params Parameter array
* \param h_overlaps Interaction matrix
* \param typ_i type of updated particle
* \param h_orientation ion array
* \param pos_new New position of updated particle
* \param orientation_new New orientation of updated particle
* \param params_new New shape parameters of updated particle
* \param counters HPMC overlap counters
*/
template<class Shape>
inline bool IntegratorHPMCMonoImplicit<Shape>::insertDepletant(vec3<Scalar>& pos_depletant,
const Shape& shape_depletant, unsigned int idx, typename Shape::param_type *h_params, unsigned int *h_overlaps,
unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation, vec3<Scalar> pos_new, quat<Scalar>& orientation_new,
const typename Shape::param_type& params_new, unsigned int &n_overlap_checks,
unsigned int &overlap_err_count, bool& overlap_shape, bool new_config)
{
overlap_shape=false;
detail::AABB aabb_depletant_local = shape_depletant.getAABB(vec3<Scalar>(0,0,0));
// now check if depletant overlaps with moved particle in the old configuration
Shape shape_i(quat<Scalar>(), params_new);
if (shape_i.hasOrientation())
{
if (! new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
vec3<Scalar> pos_i;
if (!new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(typ_i, m_type)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
overlap_shape = true;
}
// same, but for reverse move
if (shape_i.hasOrientation())
{
if (new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
if (new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
rsq = dot(r_ij,r_ij);
DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
// check for overlaps with neighboring particle's positions
bool overlap=false;
if (h_overlaps[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
// if we are already overlapping in the other configuration, this doesn't count as an insertion
overlap = true;
}
if (!overlap && overlap_shape)
{
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_depletant_image = pos_depletant + this->m_image_list[cur_image];
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
// load the position and orientation of the j particle
Scalar4 postype_j = h_postype[j];
vec3<Scalar> pos_j(postype_j);
Scalar4 orientation_j = h_orientation[j];
unsigned int type = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), h_params[type]);
if (j == idx)
{
// we have already exclued overlap with the moved particle above
continue;
}
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_j - pos_depletant_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(type, m_type)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_depletant, shape_j, overlap_err_count))
{
overlap = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
} // end if overlap with shape
return overlap_shape && !overlap;
}
/*! \param quantity Name of the log quantity to get
\param timestep Current time step of the simulation
\return the requested log quantity.
*/
template<class Shape>
Scalar IntegratorHPMCMonoImplicit<Shape>::getLogValue(const std::string& quantity, unsigned int timestep)
{
if (quantity == "hpmc_fugacity")
{
return (Scalar) m_n_R;
}
if (quantity == "hpmc_ntrial")
{
return (Scalar) m_n_trial;
}
hpmc_counters_t counters = IntegratorHPMC::getCounters(2);
hpmc_implicit_counters_t implicit_counters = getImplicitCounters(2);
if (quantity == "hpmc_insert_count")
{
// return number of depletant insertions per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.insert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_reinsert_count")
{
// return number of overlapping depletants reinserted per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.reinsert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_free_volume_fraction")
{
// return fraction of free volume in depletant insertion sphere
return (Scalar) implicit_counters.getFreeVolumeFraction();
}
if (quantity == "hpmc_overlap_fraction")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getOverlapFraction();
}
if (quantity == "hpmc_configurational_bias_ratio")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getConfigurationalBiasRatio();
}
//nothing found -> pass on to base class
return IntegratorHPMCMono<Shape>::getLogValue(quantity, timestep);
}
/*! \param mode 0 -> Absolute count, 1 -> relative to the start of the run, 2 -> relative to the last executed step
\return The current state of the acceptance counters
IntegratorHPMCMonoImplicit maintains a count of the number of accepted and rejected moves since instantiation. getCounters()
provides the current value. The parameter *mode* controls whether the returned counts are absolute, relative
to the start of the run, or relative to the start of the last executed step.
*/
template<class Shape>
hpmc_implicit_counters_t IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters(unsigned int mode)
{
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
hpmc_implicit_counters_t result;
if (mode == 0)
result = h_counters.data[0];
else if (mode == 1)
result = h_counters.data[0] - m_implicit_count_run_start;
else
result = h_counters.data[0] - m_implicit_count_step_start;
#ifdef ENABLE_MPI
if (this->m_comm)
{
// MPI Reduction to total result values on all ranks
MPI_Allreduce(MPI_IN_PLACE, &result.insert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.free_volume_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.overlap_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.reinsert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
}
#endif
return result;
}
/*! NPT simulations are not supported with implicit depletants
(The Nmu_ptPT ensemble is instable)
\returns false if resize results in overlaps
*/
template<class Shape>
bool IntegratorHPMCMonoImplicit<Shape>::attemptBoxResize(unsigned int timestep, const BoxDim& new_box)
{
this->m_exec_conf->msg->error() << "Nmu_pPT simulations are unsupported." << std::endl;
throw std::runtime_error("Error during implicit depletant integration\n");
}
//! Export this hpmc integrator to python
/*! \param name Name of the class in the exported python module
\tparam Shape An instantiation of IntegratorHPMCMono<Shape> will be exported
*/
template < class Shape > void export_IntegratorHPMCMonoImplicit(pybind11::module& m, const std::string& name)
{
pybind11::class_<IntegratorHPMCMonoImplicit<Shape>, std::shared_ptr< IntegratorHPMCMonoImplicit<Shape> > >(m, name.c_str(), pybind11::base< IntegratorHPMCMono<Shape> >())
.def(pybind11::init< std::shared_ptr<SystemDefinition>, unsigned int >())
.def("setDepletantDensity", &IntegratorHPMCMonoImplicit<Shape>::setDepletantDensity)
.def("setDepletantType", &IntegratorHPMCMonoImplicit<Shape>::setDepletantType)
.def("setNTrial", &IntegratorHPMCMonoImplicit<Shape>::setNTrial)
.def("getNTrial", &IntegratorHPMCMonoImplicit<Shape>::getNTrial)
.def("getImplicitCounters", &IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters)
;
}
//! Export the counters for depletants
inline void export_hpmc_implicit_counters(pybind11::module& m)
{
pybind11::class_< hpmc_implicit_counters_t >(m, "hpmc_implicit_counters_t")
.def_readwrite("insert_count", &hpmc_implicit_counters_t::insert_count)
.def_readwrite("reinsert_count", &hpmc_implicit_counters_t::reinsert_count)
.def_readwrite("free_volume_count", &hpmc_implicit_counters_t::free_volume_count)
.def_readwrite("overlap_count", &hpmc_implicit_counters_t::overlap_count)
.def("getFreeVolumeFraction", &hpmc_implicit_counters_t::getFreeVolumeFraction)
.def("getOverlapFraction", &hpmc_implicit_counters_t::getOverlapFraction)
.def("getConfigurationalBiasRatio", &hpmc_implicit_counters_t::getConfigurationalBiasRatio)
;
}
} // end namespace hpmc
#endif // __HPMC_MONO_IMPLICIT__H__
|
mkldnn_requantize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* \file mkldnn_requantize-inl.h
* \brief
* \author Jin Huang, Xinyu Chen
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_
#if MXNET_USE_MKLDNN == 1
#include <string>
#include <algorithm>
#include <vector>
#include "../requantize-inl.h"
#include "../../nn/mkldnn/mkldnn_base-inl.h"
namespace mxnet {
namespace op {
template <typename DstType>
static void MKLDNNRequantizeForwardKer(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs,
const float real_range) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
// check shapes
size_t i_dim = inputs[0].shape().ndim();
size_t o_dim = outputs[0].shape().ndim();
CHECK_EQ(i_dim, o_dim);
float first_quantized_range = MinAbs(MinValue<SrcDType>(),
MaxValue<SrcDType>());
float first_real_range = MaxAbs(*inputs[1].data().dptr<float>(),
*inputs[2].data().dptr<float>());
float first_scale = first_real_range / first_quantized_range;
float second_real_range = real_range;
float second_quantized_range = 0.f;
if (std::is_same<DstType, int8_t>::value) {
second_quantized_range = MinAbs(MaxValue<DstType>(), MinValue<DstType>());
*outputs[1].data().dptr<float>() = -second_real_range;
*outputs[2].data().dptr<float>() = second_real_range;
} else if (std::is_same<DstType, uint8_t>::value) {
second_quantized_range = MaxValue<DstType>();
*outputs[1].data().dptr<float>() = 0.f;
*outputs[2].data().dptr<float>() = second_real_range;
} else {
LOG(FATAL) << "Unsupported requantize output type";
}
float second_scale = second_quantized_range / second_real_range;
float scale = first_scale * second_scale;
mkldnn::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
NDArray in_buffer = inputs[0];
if (inputs[0].IsView() && inputs[0].IsMKLDNNData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetMKLDNNData();
auto i_desc = i_mem->get_desc();
auto o_desc = i_desc;
o_desc.data.data_type = get_mkldnn_type_t<DstType>();
auto reorder_pd = mkldnn::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc, attr);
auto o_mem = CreateMKLDNNMem(outputs[0], o_desc, req[0]);
MKLDNNStream::Get()->RegisterPrimArgs(
mkldnn::reorder(reorder_pd), {{MKLDNN_ARG_FROM, *i_mem}, {MKLDNN_ARG_TO, *o_mem.second}});
CommitOutput(outputs[0], o_mem);
MKLDNNStream::Get()->Submit();
}
static void MKLDNNRequantizeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
typedef int8_t DstDType;
const RequantizeParam& param = nnvm::get<RequantizeParam>(attrs.parsed);
float real_range;
// Model is calibrated
if (param.min_calib_range.has_value() && param.max_calib_range.has_value()) {
real_range =
MaxAbs(param.min_calib_range.value(), param.max_calib_range.value());
// Model is not calibrated
} else {
NDArray in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<SrcDType>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
SrcDType data_min = MaxValue<SrcDType>();
SrcDType data_max = MinValue<SrcDType>();
std::vector<SrcDType> data_maxs(nthreads, data_max);
std::vector<SrcDType> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max) data_max = data_maxs[i];
if (data_mins[i] < data_min) data_min = data_mins[i];
}
float src_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>());
SrcDType data_range = MaxAbs(data_min, data_max);
float data_scale = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>());
real_range = data_range * data_scale / src_range;
}
auto out_type = GetQuantizeOutputType(param);
if (out_type == mshadow::kUint8) {
MKLDNNRequantizeForwardKer<uint8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else if (out_type == mshadow::kInt8) {
MKLDNNRequantizeForwardKer<int8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else {
LOG(FATAL) << "mkldnn requantize op only supports int8 and uint8 as output type";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_
|
count_shared_kmer.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <omp.h>
int K = 24; // kmer's k. K <= 32
//#define FASTQ
#define BUFSIZE 400000
int s_chunk=8192;
char mat[128];
unsigned long long to_code[128];
void chomp(char * s){
int len = strlen(s);
(s[len-1] == '\n') ? s[len-1] = '\0' : fprintf(stderr, "strange str %s\nnot end with newline (too long line? > %d)", s,BUFSIZE); // chomp
}
void reversecomplement(char * str){
int len = strlen(str);
int loop=len/2;
int i;
char tmp;
for(i=0; i<loop; ++i){
// swap
tmp = str[i];
str[i] = str[len-1-i];
str[len-1-i] = tmp;
}
for(i=0; i<len; ++i){
str[i] = mat[(int)str[i]];
}
}
unsigned long long ntuple_code(const char * str, int stt, int n){
if(n>32){
fprintf(stderr, "nruplw_code: cannot handle %d(>32)-mer\n",n);
exit(1);
}
unsigned long long ret=0;
int i;
for(i=0; i<n; ++i){
ret |= (to_code[(int)str[stt+i]] << (2*(n-i-1)));
}
return ret;
}
double gettimeofday_sec(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
void read2kmerpop(char * read, unsigned long long * bucket);
void p_num_of_sharing_kmers(char **, char **, unsigned long long **, unsigned long long **, int*, int);
int compare_ull(const void *a, const void *b);
int opt_fastq=0;
int opt_type=0;
int main(int argc, char * argv[]){
mat[(int)'A'] = 'T';
mat[(int)'a'] = 'T';
mat[(int)'C'] = 'G';
mat[(int)'c'] = 'G';
mat[(int)'G'] = 'C';
mat[(int)'g'] = 'C';
mat[(int)'T'] = 'A';
mat[(int)'t'] = 'A';
mat[(int)'N'] = 'N';
mat[(int)'n'] = 'N';
to_code[(int)'A'] = 0ull;
to_code[(int)'a'] = 0ull;
to_code[(int)'C'] = 1ull;
to_code[(int)'c'] = 1ull;
to_code[(int)'G'] = 2ull;
to_code[(int)'g'] = 2ull;
to_code[(int)'T'] = 3ull;
to_code[(int)'t'] = 3ull;
to_code[(int)'N'] = 0ull;// XXX
to_code[(int)'n'] = 0ull;
int hitnum=0;
{
int result;
while((result=getopt(argc,argv,"k:qt")) != -1){
switch(result){
case 'k':
K=atoi(optarg);
if(K > 32 || K < 1){
fprintf(stderr, "K must be 1 <= K <= 32.\n");
return 1;
}
hitnum+=2;
break;
case 'q':
opt_fastq=1;
++hitnum;
break;
case 't':
opt_type=1;
++hitnum;
break;
case '?':
printf("humei\n");
break;
default:
break;
}
}
}
if(argc != 2+hitnum){
fprintf(stderr, "USAGE: <this> <in.fa>\n");
fprintf(stderr, "\t-k=i: kmer's k (<=32)\n");
fprintf(stderr, "\t-t: count the number of types of shared kmers, not freq\n");
return 1;
}
char * in_fa = argv[1+hitnum];
FILE * fp = fopen(in_fa,"r");
if(fp == NULL){
fprintf(stderr, "cannot open %s\n", in_fa);
exit(1);
}
char **reads = (char**)malloc(sizeof(char*)*s_chunk);
if(reads == NULL){
fprintf(stderr,"cannot allocate memory: reads\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
reads[i] = (char*)malloc(sizeof(char)*BUFSIZE);
if(reads[i] == NULL){
fprintf(stderr,"cannot allocate memory: reads[%d]\n",i);
exit(1);
}
}
}
char **nls = (char**)malloc(sizeof(char*)*s_chunk);//namelines
if(nls == NULL){
fprintf(stderr,"cannot allocate memory: nls\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
nls[i] = (char*)malloc(sizeof(char)*BUFSIZE);
if(nls[i] == NULL){
fprintf(stderr,"cannot allocate memory: nls[%d]\n",i);
exit(1);
}
}
}
char *dum = (char*)malloc(sizeof(char)*BUFSIZE);
if(dum == NULL){
fprintf(stderr,"cannot allocate memory: dum\n");
exit(1);
}
unsigned long long ** buckets = (unsigned long long**)malloc(sizeof(unsigned long long*)*s_chunk);
if(buckets == NULL){
fprintf(stderr, "cannot allocate memory: buckets\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
buckets[i] = (unsigned long long*)malloc(sizeof(unsigned long long)*BUFSIZE);
if(buckets[i] == NULL){
fprintf(stderr,"cannot allocate memory: buckets[%d]\n",i);
exit(1);
}
}
}
unsigned long long ** b2 = (unsigned long long**)malloc(sizeof(unsigned long long*)*s_chunk);
if(b2 == NULL){
fprintf(stderr, "cannot allocate memory: b2\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
b2[i] = (unsigned long long*)malloc(sizeof(unsigned long long)*BUFSIZE);
if(b2[i] == NULL){
fprintf(stderr,"cannot allocate memory: b2[%d]\n",i);
exit(1);
}
}
}
int * n_share = (int*)malloc(sizeof(int)*s_chunk);
if(n_share == NULL){
fprintf(stderr,"cannot allocate memory: n_share\n");
exit(1);
}
int n_read=0;
while(fgets(nls[n_read],BUFSIZE,fp)!=NULL){
chomp(nls[n_read]);
fgets(reads[n_read],BUFSIZE,fp);
chomp(reads[n_read]);
if(opt_fastq){
fgets(dum,BUFSIZE,fp);// opt
fgets(dum,BUFSIZE,fp);// qvs
}
if(strlen(reads[n_read]) < K){
continue;
}
else{
++n_read;
}
if(n_read<s_chunk){
continue;
}
p_num_of_sharing_kmers(reads,nls,buckets,b2,n_share,n_read);
n_read=0;
}
p_num_of_sharing_kmers(reads,nls,buckets,b2,n_share,n_read);
fclose(fp);
{
int i;
for(i=0; i<s_chunk; ++i){
free(reads[i]);
}
}
free(reads);
{
int i;
for(i=0; i<s_chunk; ++i){
free(nls[i]);
}
}
free(nls);
free(dum);
{
int i;
for(i=0; i<s_chunk; ++i){
free(buckets[i]);
}
}
free(buckets);
{
int i;
for(i=0; i<s_chunk; ++i){
free(b2[i]);
}
}
free(b2);
free(n_share);
return 0;
}
void read2kmerpop(char * read, unsigned long long * bucket){
unsigned long long kalph = ntuple_code(read,0,K);
bucket[0] = kalph;
unsigned long long mask=1ull;
if(K<32){
mask<<=2*K;
mask -= 1ull;
}
else{
mask = 0xffffffffffffffffull;
}
int imax = strlen(read)-K;
int i;
for(i=1; i<= imax; ++i){
kalph <<= 2;
kalph &= mask;
kalph |= to_code[(int)read[K-1+i]];
bucket[i] = kalph;
}
/*
for(i=0; i<count; ++i){
if(bucket[i] > 0){
fprintf(stdout, "%d\t%d\n", i, bucket[i]);
}
}
fprintf(stdout, "%d\t%d\n", INT_MAX, INT_MAX);// as a separator
*/
return;
}
void p_num_of_sharing_kmers(char ** reads, char ** nls, unsigned long long ** buckets, unsigned long long ** b2, int * n_share, int n_reads){
int i;
#pragma omp parallel for
for(i=0; i<n_reads; ++i){
// printf("%s\n",read);
// double stt,end;
// stt=gettimeofday_sec();
read2kmerpop(reads[i], buckets[i]);
// end=gettimeofday_sec();
// fprintf(stderr,"read2kmerpop_1: %f\n",end-stt);
reversecomplement(reads[i]);
// printf("%s\n",read);
// stt=gettimeofday_sec();
read2kmerpop(reads[i], b2[i]);
// end=gettimeofday_sec();
// fprintf(stderr,"read2kmerpop_2: %f\n",end-stt);
// stt=gettimeofday_sec();
int j,k;
int loop = strlen(reads[i])-K+1;
n_share[i]=0;
qsort(buckets[i], loop, sizeof(unsigned long long), compare_ull);
qsort(b2[i], loop, sizeof(unsigned long long), compare_ull);
/*
for(j=0; j<loop;++j){
printf("%llu,",buckets[i][j]);
}
printf("\n");
for(j=0; j<loop;++j){
printf("%llu,",b2[i][j]);
}
printf("\n");
*/
for(j=0,k=0; j<loop && k<loop;){
if(buckets[i][j] < b2[i][k]){
++j;
}
else if(buckets[i][j] > b2[i][k]){
++k;
}
else{
++n_share[i];
++j;
++k;
if(opt_type){
while(j<loop && buckets[i][j] == buckets[i][j-1]){
++j;
}
while(k<loop && b2[i][k] == b2[i][k-1]){
++k;
}
}
}
}
printf("%d\t%s\n",n_share[i],&nls[i][1]);
// end=gettimeofday_sec();
// fprintf(stderr,"kmercount: %f\n",end-stt);
}
}
int compare_ull(const void *a, const void *b){
unsigned long long foo = *(unsigned long long*)a;
unsigned long long bar = *(unsigned long long*)b;
// printf("%llu\n",foo);
// printf("%llu\n",bar);
if(foo<bar){
return -1;
}
else if(foo==bar){
return 0;
}
else{
return 1;
}
}
|
affinity_display.1.c | // RUN: %libomp-compile
// RUN: env OMP_DISPLAY_AFFINITY=TRUE OMP_NUM_THREADS=4 OMP_PLACES='{0,1},{2,3},{4,5},{6,7}' %libomp-run | %python %S/check.py -c 'CHECK' %s
// Affinity Display examples
#include <stdio.h>
#include <stdlib.h> // also null is in <stddef.h>
#include <stddef.h>
#include <omp.h>
#include <string.h>
// ENVIRONMENT
// OMP_DISPLAY_AFFINITY=TRUE
// OMP_NUM_THREADS=4
// OMP_PLACES='{0,1},{2,3},{4,5},{6,7}'
// CHECK: num_threads=1 OMP: pid [0-9]+ tid [0-9]+ thread [0-4] bound to OS proc set \{([0-7])|(0,1)|(undefined)\}
// CHECK: num_threads=4 Thread id [0-3] reporting in
// CHECK: num_threads=4 OMP: pid [0-9]+ tid [0-9]+ thread [0-4] bound to OS proc set \{([0-7])|([0246],[1357])|(undefined)\}
// CHECK: num_threads=1 Default Affinity Format is:
// CHECK: num_threads=1 Affinity Format set to: host=%20H tid=%0.4n binds_to=%A
// CHECK: num_threads=4 tid=[0-3] affinity:host=[a-zA-Z0-9_.-]+[ ]+tid=000[0-4][ ]+binds_to=(([0-7])|([0246],[1357])|(undefined))
#define FORMAT_STORE 80
#define BUFFER_STORE 80
int main(int argc, char** argv) {
int i, n, tid, max_req_store = 0;
size_t nchars;
char default_format[FORMAT_STORE];
char my_format[] = "host=%20H tid=%0.4n binds_to=%A";
char **buffer;
// CODE SEGMENT 1 AFFINITY DISPLAY
omp_display_affinity(NULL);
// OMP_DISPLAY_AFFINITY=TRUE,
// Affinity reported for 1 parallel region
#pragma omp parallel
{
printf("Thread id %d reporting in.\n", omp_get_thread_num());
}
// Get and Display Default Affinity Format
nchars = omp_get_affinity_format(default_format, (size_t)FORMAT_STORE);
printf("Default Affinity Format is: %s\n", default_format);
if (nchars > FORMAT_STORE) {
printf("Caution: Reported Format is truncated. Increase\n");
printf(" FORMAT_STORE by %d.\n", (int)nchars - FORMAT_STORE);
}
// Set Affinity Format
omp_set_affinity_format(my_format);
printf("Affinity Format set to: %s\n", my_format);
// CODE SEGMENT 3 CAPTURE AFFINITY
// Set up buffer for affinity of n threads
n = omp_get_max_threads();
buffer = (char **)malloc(sizeof(char *) * n);
for (i = 0; i < n; i++) {
buffer[i] = (char *)malloc(sizeof(char) * BUFFER_STORE);
}
// Capture Affinity using Affinity Format set above.
// Use critical reduction to check size of buffer areas
#pragma omp parallel private(tid, nchars)
{
tid = omp_get_thread_num();
nchars = omp_capture_affinity(buffer[tid], (size_t)BUFFER_STORE, NULL);
#pragma omp critical
{
if (nchars > max_req_store)
max_req_store = nchars;
}
}
for (i = 0; i < n; i++) {
printf("tid=%d affinity:%s:\n", i, buffer[i]);
}
// for 4 threads with OMP_PLACES='{0,1},{2,3},{4,5},{6,7}'
// host=%20H tid=%0.4n binds_to=%A
// host=<hostname> tid=0000 binds_to=0,1
// host=<hostname> tid=0001 binds_to=2,3
// host=<hostname> tid=0002 binds_to=4,5
// host=<hostname> tid=0003 binds_to=6,7
if (max_req_store > BUFFER_STORE) {
printf("Caution: Affinity string truncated. Increase\n");
printf(" BUFFER_STORE by %d\n", max_req_store - BUFFER_STORE);
}
return 0;
}
|
mur_abc.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "mur_abc.kernel_inc.h"
int openmp_yee_setfix_xyz_init (openmp_pscmc_env * pe ,openmp_yee_setfix_xyz_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_xyz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_xyz_struct ));
}
int openmp_yee_setfix_xyz_get_num_compute_units (openmp_yee_setfix_xyz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_xyz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_xyz_exec (openmp_yee_setfix_xyz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_xyz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_xyz_scmc_set_parameter_outEB (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_inEB (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_cur_rankx (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_cur_ranky (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_cur_rankz (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_numvec (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_XLEN (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_YLEN (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_ZLEN (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_ovlp (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_xblock (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_yblock (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_zblock (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_num_ele (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_damp_vars (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_xyz_scmc_set_parameter_deltat (openmp_yee_setfix_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_xyz_init (openmp_pscmc_env * pe ,openmp_yee_damp_xyz_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_xyz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_xyz_struct ));
}
int openmp_yee_damp_xyz_get_num_compute_units (openmp_yee_damp_xyz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_xyz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_xyz_exec (openmp_yee_damp_xyz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_xyz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_xyz_scmc_set_parameter_outEB (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_inEB (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_cur_rankx (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_cur_ranky (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_cur_rankz (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_y_cpu_core (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_numvec (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_XLEN (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_YLEN (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_ZLEN (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_ovlp (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_xblock (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_yblock (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_zblock (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_num_ele (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_damp_vars (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_xyz_scmc_set_parameter_deltat (openmp_yee_damp_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_xyz_init (openmp_pscmc_env * pe ,openmp_yee_pec_xyz_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_xyz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_xyz_struct ));
}
int openmp_yee_pec_xyz_get_num_compute_units (openmp_yee_pec_xyz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_xyz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_xyz_exec (openmp_yee_pec_xyz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_xyz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_xyz_scmc_set_parameter_outEB (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_inEB (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_cur_rankx (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_cur_ranky (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_cur_rankz (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_y_cpu_core (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_numvec (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_XLEN (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_YLEN (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_ZLEN (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_ovlp (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_xblock (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_yblock (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_zblock (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_num_ele (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_damp_vars (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_xyz_scmc_set_parameter_deltat (openmp_yee_pec_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_xyz_init (openmp_pscmc_env * pe ,openmp_yee_abc_xyz_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_xyz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_xyz_struct ));
}
int openmp_yee_abc_xyz_get_num_compute_units (openmp_yee_abc_xyz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_xyz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_xyz_exec (openmp_yee_abc_xyz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_xyz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_xyz_scmc_set_parameter_outEB (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_inEB (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_cur_rankx (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_cur_ranky (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_cur_rankz (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_y_cpu_core (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_numvec (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_XLEN (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_YLEN (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_ZLEN (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_ovlp (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_xblock (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_yblock (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_zblock (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_num_ele (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_damp_vars (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_xyz_scmc_set_parameter_deltat (openmp_yee_abc_xyz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_yz_init (openmp_pscmc_env * pe ,openmp_yee_setfix_yz_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_yz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_yz_struct ));
}
int openmp_yee_setfix_yz_get_num_compute_units (openmp_yee_setfix_yz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_yz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_yz_exec (openmp_yee_setfix_yz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_yz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_yz_scmc_set_parameter_outEB (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_inEB (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_cur_rankx (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_cur_ranky (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_cur_rankz (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_numvec (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_XLEN (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_YLEN (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_ZLEN (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_ovlp (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_xblock (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_yblock (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_zblock (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_num_ele (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_damp_vars (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_yz_scmc_set_parameter_deltat (openmp_yee_setfix_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_yz_init (openmp_pscmc_env * pe ,openmp_yee_damp_yz_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_yz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_yz_struct ));
}
int openmp_yee_damp_yz_get_num_compute_units (openmp_yee_damp_yz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_yz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_yz_exec (openmp_yee_damp_yz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_yz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_yz_scmc_set_parameter_outEB (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_inEB (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_cur_rankx (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_cur_ranky (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_cur_rankz (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_y_cpu_core (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_numvec (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_XLEN (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_YLEN (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_ZLEN (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_ovlp (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_xblock (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_yblock (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_zblock (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_num_ele (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_damp_vars (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_yz_scmc_set_parameter_deltat (openmp_yee_damp_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_yz_init (openmp_pscmc_env * pe ,openmp_yee_pec_yz_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_yz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_yz_struct ));
}
int openmp_yee_pec_yz_get_num_compute_units (openmp_yee_pec_yz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_yz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_yz_exec (openmp_yee_pec_yz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_yz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_yz_scmc_set_parameter_outEB (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_inEB (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_cur_rankx (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_cur_ranky (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_cur_rankz (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_y_cpu_core (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_numvec (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_XLEN (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_YLEN (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_ZLEN (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_ovlp (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_xblock (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_yblock (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_zblock (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_num_ele (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_damp_vars (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_yz_scmc_set_parameter_deltat (openmp_yee_pec_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_yz_init (openmp_pscmc_env * pe ,openmp_yee_abc_yz_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_yz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_yz_struct ));
}
int openmp_yee_abc_yz_get_num_compute_units (openmp_yee_abc_yz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_yz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_yz_exec (openmp_yee_abc_yz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_yz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_yz_scmc_set_parameter_outEB (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_inEB (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_cur_rankx (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_cur_ranky (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_cur_rankz (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_y_cpu_core (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_numvec (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_XLEN (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_YLEN (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_ZLEN (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_ovlp (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_xblock (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_yblock (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_zblock (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_num_ele (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_damp_vars (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_yz_scmc_set_parameter_deltat (openmp_yee_abc_yz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_xz_init (openmp_pscmc_env * pe ,openmp_yee_setfix_xz_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_xz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_xz_struct ));
}
int openmp_yee_setfix_xz_get_num_compute_units (openmp_yee_setfix_xz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_xz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_xz_exec (openmp_yee_setfix_xz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_xz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_xz_scmc_set_parameter_outEB (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_inEB (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_cur_rankx (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_cur_ranky (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_cur_rankz (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_numvec (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_XLEN (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_YLEN (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_ZLEN (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_ovlp (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_xblock (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_yblock (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_zblock (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_num_ele (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_damp_vars (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_xz_scmc_set_parameter_deltat (openmp_yee_setfix_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_xz_init (openmp_pscmc_env * pe ,openmp_yee_damp_xz_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_xz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_xz_struct ));
}
int openmp_yee_damp_xz_get_num_compute_units (openmp_yee_damp_xz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_xz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_xz_exec (openmp_yee_damp_xz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_xz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_xz_scmc_set_parameter_outEB (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_inEB (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_cur_rankx (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_cur_ranky (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_cur_rankz (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_y_cpu_core (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_numvec (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_XLEN (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_YLEN (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_ZLEN (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_ovlp (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_xblock (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_yblock (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_zblock (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_num_ele (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_damp_vars (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_xz_scmc_set_parameter_deltat (openmp_yee_damp_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_xz_init (openmp_pscmc_env * pe ,openmp_yee_pec_xz_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_xz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_xz_struct ));
}
int openmp_yee_pec_xz_get_num_compute_units (openmp_yee_pec_xz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_xz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_xz_exec (openmp_yee_pec_xz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_xz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_xz_scmc_set_parameter_outEB (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_inEB (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_cur_rankx (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_cur_ranky (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_cur_rankz (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_y_cpu_core (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_numvec (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_XLEN (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_YLEN (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_ZLEN (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_ovlp (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_xblock (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_yblock (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_zblock (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_num_ele (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_damp_vars (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_xz_scmc_set_parameter_deltat (openmp_yee_pec_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_xz_init (openmp_pscmc_env * pe ,openmp_yee_abc_xz_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_xz_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_xz_struct ));
}
int openmp_yee_abc_xz_get_num_compute_units (openmp_yee_abc_xz_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_xz_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_xz_exec (openmp_yee_abc_xz_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_xz_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_xz_scmc_set_parameter_outEB (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_inEB (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_cur_rankx (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_cur_ranky (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_cur_rankz (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_y_cpu_core (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_numvec (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_XLEN (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_YLEN (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_ZLEN (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_ovlp (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_xblock (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_yblock (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_zblock (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_num_ele (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_damp_vars (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_xz_scmc_set_parameter_deltat (openmp_yee_abc_xz_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_xy_init (openmp_pscmc_env * pe ,openmp_yee_setfix_xy_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_xy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_xy_struct ));
}
int openmp_yee_setfix_xy_get_num_compute_units (openmp_yee_setfix_xy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_xy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_xy_exec (openmp_yee_setfix_xy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_xy_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_xy_scmc_set_parameter_outEB (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_inEB (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_cur_rankx (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_cur_ranky (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_cur_rankz (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_numvec (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_XLEN (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_YLEN (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_ZLEN (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_ovlp (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_xblock (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_yblock (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_zblock (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_num_ele (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_damp_vars (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_xy_scmc_set_parameter_deltat (openmp_yee_setfix_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_xy_init (openmp_pscmc_env * pe ,openmp_yee_damp_xy_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_xy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_xy_struct ));
}
int openmp_yee_damp_xy_get_num_compute_units (openmp_yee_damp_xy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_xy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_xy_exec (openmp_yee_damp_xy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_xy_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_xy_scmc_set_parameter_outEB (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_inEB (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_cur_rankx (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_cur_ranky (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_cur_rankz (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_y_cpu_core (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_numvec (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_XLEN (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_YLEN (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_ZLEN (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_ovlp (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_xblock (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_yblock (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_zblock (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_num_ele (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_damp_vars (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_xy_scmc_set_parameter_deltat (openmp_yee_damp_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_xy_init (openmp_pscmc_env * pe ,openmp_yee_pec_xy_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_xy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_xy_struct ));
}
int openmp_yee_pec_xy_get_num_compute_units (openmp_yee_pec_xy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_xy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_xy_exec (openmp_yee_pec_xy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_xy_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_xy_scmc_set_parameter_outEB (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_inEB (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_cur_rankx (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_cur_ranky (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_cur_rankz (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_y_cpu_core (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_numvec (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_XLEN (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_YLEN (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_ZLEN (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_ovlp (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_xblock (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_yblock (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_zblock (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_num_ele (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_damp_vars (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_xy_scmc_set_parameter_deltat (openmp_yee_pec_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_xy_init (openmp_pscmc_env * pe ,openmp_yee_abc_xy_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_xy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_xy_struct ));
}
int openmp_yee_abc_xy_get_num_compute_units (openmp_yee_abc_xy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_xy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_xy_exec (openmp_yee_abc_xy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_xy_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_xy_scmc_set_parameter_outEB (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_inEB (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_cur_rankx (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_cur_ranky (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_cur_rankz (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_y_cpu_core (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_numvec (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_XLEN (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_YLEN (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_ZLEN (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_ovlp (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_xblock (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_yblock (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_zblock (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_num_ele (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_damp_vars (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_xy_scmc_set_parameter_deltat (openmp_yee_abc_xy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_z_init (openmp_pscmc_env * pe ,openmp_yee_setfix_z_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_z_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_z_struct ));
}
int openmp_yee_setfix_z_get_num_compute_units (openmp_yee_setfix_z_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_z_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_z_exec (openmp_yee_setfix_z_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_z_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_z_scmc_set_parameter_outEB (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_inEB (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_cur_rankx (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_cur_ranky (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_cur_rankz (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_numvec (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_XLEN (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_YLEN (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_ZLEN (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_ovlp (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_xblock (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_yblock (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_zblock (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_num_ele (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_damp_vars (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_z_scmc_set_parameter_deltat (openmp_yee_setfix_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_z_init (openmp_pscmc_env * pe ,openmp_yee_damp_z_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_z_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_z_struct ));
}
int openmp_yee_damp_z_get_num_compute_units (openmp_yee_damp_z_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_z_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_z_exec (openmp_yee_damp_z_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_z_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_z_scmc_set_parameter_outEB (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_inEB (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_cur_rankx (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_cur_ranky (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_cur_rankz (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_y_cpu_core (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_numvec (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_XLEN (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_YLEN (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_ZLEN (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_ovlp (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_xblock (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_yblock (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_zblock (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_num_ele (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_damp_vars (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_z_scmc_set_parameter_deltat (openmp_yee_damp_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_z_init (openmp_pscmc_env * pe ,openmp_yee_pec_z_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_z_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_z_struct ));
}
int openmp_yee_pec_z_get_num_compute_units (openmp_yee_pec_z_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_z_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_z_exec (openmp_yee_pec_z_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_z_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_z_scmc_set_parameter_outEB (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_inEB (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_cur_rankx (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_cur_ranky (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_cur_rankz (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_y_cpu_core (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_numvec (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_XLEN (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_YLEN (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_ZLEN (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_ovlp (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_xblock (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_yblock (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_zblock (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_num_ele (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_damp_vars (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_z_scmc_set_parameter_deltat (openmp_yee_pec_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_z_init (openmp_pscmc_env * pe ,openmp_yee_abc_z_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_z_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_z_struct ));
}
int openmp_yee_abc_z_get_num_compute_units (openmp_yee_abc_z_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_z_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_z_exec (openmp_yee_abc_z_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_z_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_z_scmc_set_parameter_outEB (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_inEB (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_cur_rankx (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_cur_ranky (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_cur_rankz (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_y_cpu_core (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_numvec (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_XLEN (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_YLEN (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_ZLEN (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_ovlp (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_xblock (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_yblock (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_zblock (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_num_ele (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_damp_vars (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_z_scmc_set_parameter_deltat (openmp_yee_abc_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_y_init (openmp_pscmc_env * pe ,openmp_yee_setfix_y_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_y_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_y_struct ));
}
int openmp_yee_setfix_y_get_num_compute_units (openmp_yee_setfix_y_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_y_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_y_exec (openmp_yee_setfix_y_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_y_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_y_scmc_set_parameter_outEB (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_inEB (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_cur_rankx (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_cur_ranky (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_cur_rankz (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_numvec (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_XLEN (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_YLEN (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_ZLEN (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_ovlp (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_xblock (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_yblock (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_zblock (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_num_ele (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_damp_vars (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_y_scmc_set_parameter_deltat (openmp_yee_setfix_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_y_init (openmp_pscmc_env * pe ,openmp_yee_damp_y_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_y_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_y_struct ));
}
int openmp_yee_damp_y_get_num_compute_units (openmp_yee_damp_y_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_y_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_y_exec (openmp_yee_damp_y_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_y_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_y_scmc_set_parameter_outEB (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_inEB (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_cur_rankx (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_cur_ranky (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_cur_rankz (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_y_cpu_core (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_numvec (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_XLEN (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_YLEN (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_ZLEN (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_ovlp (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_xblock (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_yblock (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_zblock (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_num_ele (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_damp_vars (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_y_scmc_set_parameter_deltat (openmp_yee_damp_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_y_init (openmp_pscmc_env * pe ,openmp_yee_pec_y_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_y_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_y_struct ));
}
int openmp_yee_pec_y_get_num_compute_units (openmp_yee_pec_y_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_y_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_y_exec (openmp_yee_pec_y_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_y_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_y_scmc_set_parameter_outEB (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_inEB (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_cur_rankx (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_cur_ranky (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_cur_rankz (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_y_cpu_core (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_numvec (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_XLEN (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_YLEN (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_ZLEN (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_ovlp (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_xblock (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_yblock (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_zblock (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_num_ele (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_damp_vars (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_y_scmc_set_parameter_deltat (openmp_yee_pec_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_y_init (openmp_pscmc_env * pe ,openmp_yee_abc_y_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_y_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_y_struct ));
}
int openmp_yee_abc_y_get_num_compute_units (openmp_yee_abc_y_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_y_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_y_exec (openmp_yee_abc_y_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_y_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_y_scmc_set_parameter_outEB (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_inEB (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_cur_rankx (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_cur_ranky (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_cur_rankz (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_y_cpu_core (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_numvec (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_XLEN (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_YLEN (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_ZLEN (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_ovlp (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_xblock (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_yblock (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_zblock (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_num_ele (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_damp_vars (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_y_scmc_set_parameter_deltat (openmp_yee_abc_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix_x_init (openmp_pscmc_env * pe ,openmp_yee_setfix_x_struct * kerstr ){
return 0 ;}
void openmp_yee_setfix_x_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix_x_struct ));
}
int openmp_yee_setfix_x_get_num_compute_units (openmp_yee_setfix_x_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix_x_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix_x_exec (openmp_yee_setfix_x_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix_x_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix_x_scmc_set_parameter_outEB (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_inEB (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_cur_rankx (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_cur_ranky (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_cur_rankz (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_y_cpu_core (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_numvec (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_XLEN (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_YLEN (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_ZLEN (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_ovlp (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_xblock (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_yblock (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_zblock (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_num_ele (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_damp_vars (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix_x_scmc_set_parameter_deltat (openmp_yee_setfix_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp_x_init (openmp_pscmc_env * pe ,openmp_yee_damp_x_struct * kerstr ){
return 0 ;}
void openmp_yee_damp_x_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp_x_struct ));
}
int openmp_yee_damp_x_get_num_compute_units (openmp_yee_damp_x_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp_x_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp_x_exec (openmp_yee_damp_x_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp_x_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp_x_scmc_set_parameter_outEB (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_inEB (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_cur_rankx (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_cur_ranky (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_cur_rankz (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_y_cpu_core (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_numvec (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_XLEN (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_YLEN (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_ZLEN (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_ovlp (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_xblock (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_yblock (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_zblock (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_num_ele (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_damp_vars (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp_x_scmc_set_parameter_deltat (openmp_yee_damp_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec_x_init (openmp_pscmc_env * pe ,openmp_yee_pec_x_struct * kerstr ){
return 0 ;}
void openmp_yee_pec_x_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec_x_struct ));
}
int openmp_yee_pec_x_get_num_compute_units (openmp_yee_pec_x_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec_x_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec_x_exec (openmp_yee_pec_x_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec_x_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec_x_scmc_set_parameter_outEB (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_inEB (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_cur_rankx (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_cur_ranky (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_cur_rankz (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_y_cpu_core (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_numvec (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_XLEN (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_YLEN (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_ZLEN (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_ovlp (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_xblock (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_yblock (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_zblock (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_num_ele (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_damp_vars (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec_x_scmc_set_parameter_deltat (openmp_yee_pec_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc_x_init (openmp_pscmc_env * pe ,openmp_yee_abc_x_struct * kerstr ){
return 0 ;}
void openmp_yee_abc_x_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc_x_struct ));
}
int openmp_yee_abc_x_get_num_compute_units (openmp_yee_abc_x_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc_x_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc_x_exec (openmp_yee_abc_x_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc_x_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc_x_scmc_set_parameter_outEB (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_inEB (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_cur_rankx (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_cur_ranky (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_cur_rankz (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_y_cpu_core (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_numvec (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_XLEN (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_YLEN (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_ZLEN (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_ovlp (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_xblock (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_yblock (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_zblock (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_num_ele (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_damp_vars (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc_x_scmc_set_parameter_deltat (openmp_yee_abc_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_setfix__init (openmp_pscmc_env * pe ,openmp_yee_setfix__struct * kerstr ){
return 0 ;}
void openmp_yee_setfix__get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_setfix__struct ));
}
int openmp_yee_setfix__get_num_compute_units (openmp_yee_setfix__struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_setfix__get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_setfix__exec (openmp_yee_setfix__struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_setfix__scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_setfix__scmc_set_parameter_outEB (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_inEB (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_cur_rankx (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_cur_ranky (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_cur_rankz (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_y_cpu_core (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_numvec (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_XLEN (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_YLEN (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_ZLEN (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_ovlp (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_xblock (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_yblock (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_zblock (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_num_ele (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_damp_vars (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_setfix__scmc_set_parameter_deltat (openmp_yee_setfix__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_damp__init (openmp_pscmc_env * pe ,openmp_yee_damp__struct * kerstr ){
return 0 ;}
void openmp_yee_damp__get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_damp__struct ));
}
int openmp_yee_damp__get_num_compute_units (openmp_yee_damp__struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_damp__get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_damp__exec (openmp_yee_damp__struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_damp__scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_damp__scmc_set_parameter_outEB (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_inEB (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_cur_rankx (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_cur_ranky (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_cur_rankz (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_y_cpu_core (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_numvec (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_XLEN (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_YLEN (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_ZLEN (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_ovlp (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_xblock (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_yblock (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_zblock (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_num_ele (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_damp_vars (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_damp__scmc_set_parameter_deltat (openmp_yee_damp__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_pec__init (openmp_pscmc_env * pe ,openmp_yee_pec__struct * kerstr ){
return 0 ;}
void openmp_yee_pec__get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_pec__struct ));
}
int openmp_yee_pec__get_num_compute_units (openmp_yee_pec__struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_pec__get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_pec__exec (openmp_yee_pec__struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_pec__scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_pec__scmc_set_parameter_outEB (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_inEB (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_cur_rankx (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_cur_ranky (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_cur_rankz (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_y_cpu_core (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_numvec (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_XLEN (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_YLEN (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_ZLEN (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_ovlp (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_xblock (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_yblock (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_zblock (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_num_ele (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_damp_vars (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_pec__scmc_set_parameter_deltat (openmp_yee_pec__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
int openmp_yee_abc__init (openmp_pscmc_env * pe ,openmp_yee_abc__struct * kerstr ){
return 0 ;}
void openmp_yee_abc__get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_yee_abc__struct ));
}
int openmp_yee_abc__get_num_compute_units (openmp_yee_abc__struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_yee_abc__get_xlen (){
return IDX_OPT_MAX ;}
int openmp_yee_abc__exec (openmp_yee_abc__struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_yee_abc__scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->cur_rankx , ( kerstr )->cur_ranky , ( kerstr )->cur_rankz , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->damp_vars)[0] , ( ( kerstr )->deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_yee_abc__scmc_set_parameter_outEB (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->outEB = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_inEB (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inEB = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_cur_rankx (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankx = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_cur_ranky (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_ranky = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_cur_rankz (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cur_rankz = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_y_cpu_core (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_numvec (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_XLEN (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_YLEN (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_ZLEN (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_ovlp (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_xblock (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_yblock (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_zblock (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_num_ele (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_damp_vars (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->damp_vars = pm->d_data);
}
int openmp_yee_abc__scmc_set_parameter_deltat (openmp_yee_abc__struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->deltat = pm->d_data);
}
|
align_stats.c | #include <getopt.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include "gem_tools.h"
#include "align_stats.h"
// Every entry will be set to zero be default
static const char base_tab[256]= {
['A']=2, ['C']=3, ['G']=4, ['T']=5, ['N']=1,
['a']=2, ['c']=3, ['g']=4, ['t']=5, ['n']=1};
static const char id_brk_char[256]= {
[':']=ID_COLON_CHAR, ['#']=ID_HASH_CHAR, [' ']=ID_SPACE_CHAR, ['/']=ID_SLASH_CHAR, [0]=ID_END_CHAR
};
static void usage(FILE *f)
{
fputs("usage:\n align_stats\n",f);
fputs(" -r|--reads <reads file or file pair>\n",f);
fputs(" -o|--output <output stats file>\n",f);
fputs(" -d|--insert_dist <output insert size distribution file>\n",f);
fputs(" -M|--min_insert <minimum insert size> (for pairing of single end files: default=0)\n",f);
fprintf(f," -m|--max_insert <maximum insert size> (for pairing of single end files: default=%d)\n",DEFAULT_MAX_INSERT);
#ifdef HAVE_OPENMP
fputs(" -t|--threads <number of threads>\n",f);
#endif
#ifdef HAVE_ZLIB
fputs(" -z|--gzip (compress output files with gzip\n",f);
fputs(" -Z|--no=compress (default)\n",f);
#endif
#ifdef HAVE_BZLIB
fputs(" -j|--bzip2 (compress output files with bzip2\n",f);
#endif
fputs(" -l|--read_length <untrimmed read length>\n",f);
fputs(" -V|--variable Variable length reads\n",f);
fputs(" -p|--paired Paired mapping input file\n",f);
fputs(" -i|--ignore_id Do not attempt to parse read IDs\n",f);
fputs(" -w|--mmap mmap input files\n",f);
fprintf(f," -P|--phage_lambda <identifier for phage lambda> (default='%s')\n",PHAGE_LAMBDA);
fprintf(f," -X|--phix174 <identifier for phiX174> (default='%s')\n",PHIX174);
fprintf(f," -L|--max_read_length <maximum valid read length> (default=%u)\n",MAX_READ_LENGTH);
fprintf(f," -F|--fastq select fastq quality coding %s\n",DEFAULT_QUAL_OFFSET==QUAL_FASTQ?"(default)":"");
fprintf(f," -S|--solexa select ilumina quality coding %s\n",DEFAULT_QUAL_OFFSET==QUAL_SOLEXA?"(default)":"");
fprintf(f," -q|--qual_off select quality value offset (default=%d)\n",DEFAULT_QUAL_OFFSET);
fputs(" -h|help|usage (print this file\n\n",f);
}
static void set_opt(char *opt,char **opt_p,char *val)
{
if(*opt_p) {
fprintf(stderr,"multiple %s options: '%s' overwriting previous definition '%s'\n",opt,val,*opt_p);
free(*opt_p);
}
*opt_p=strdup(val);
}
static void *as_malloc(size_t s)
{
void *p;
p=malloc(s);
gt_cond_fatal_error(!p,MEM_HANDLER);
return p;
}
static void *as_calloc(size_t n,size_t s)
{
void *p;
p=calloc(n,s);
gt_cond_fatal_error(!p,MEM_HANDLER);
return p;
}
static void *as_realloc(void *ptr,size_t s)
{
void *p;
p=realloc(ptr,s);
gt_cond_fatal_error(!p,MEM_HANDLER);
return p;
}
static void as_set_output_files(as_param *param)
{
char *csuff[3]={"",".gz",".bz2"};
char *cs;
if(!(param->output_file && param->dist_file)) {
switch(param->compress) {
case GZIP:
cs=csuff[1];
break;
case BZIP2:
cs=csuff[2];
break;
default:
cs=csuff[0];
break;
}
if(param->input_files[0]) {
char *root=strdup(param->input_files[0]);
char *p=strrchr(root,'/');
if(p) root=p+1;
p=strchr(root,'.');
if(p) *p=0;
if(param->input_files[1]) {
if(p-root>2 && (p[-1]=='1' || p[-1]=='2')) {
if(p[-2]=='.' || p[-2]=='_') p[-2]=0;
else p[-1]=0;
}
else if(p-root>6 && !strncmp(p-4,"_map",4) && (p[-5]=='1' || p[-5]=='2') && p[-6]=='_') p[-6]=0;
} else {
if(p-root>4 && !strncmp(p-4,"_map",4)) p[-4]=0;
}
if(!param->output_file) asprintf(¶m->output_file,"%s_report.txt%s",root,cs);
if(!param->dist_file) asprintf(¶m->dist_file,"%s_frag_dist.txt%s",root,cs);
} else {
if(!param->output_file) asprintf(¶m->output_file,"align_stats_report.txt%s",cs);
if(!param->dist_file) asprintf(¶m->dist_file,"align_stats_frag_dist.txt%s",cs);
}
}
if(!strcmp(param->output_file,"-")) param->output_file=0;
if(!strcmp(param->dist_file,"-")) param->dist_file=0;
}
static id_tag *new_id_tag(void)
{
id_tag *idt;
idt=as_malloc(sizeof(id_tag));
idt->instrument_name=gt_string_new(128);
idt->run=gt_string_new(128);
idt->flowcell=gt_string_new(128);
idt->index=gt_string_new(128);
return idt;
}
static void clear_id_tag(id_tag *idt)
{
gt_string_clear(idt->instrument_name);
gt_string_clear(idt->run);
gt_string_clear(idt->flowcell);
gt_string_clear(idt->index);
}
static void free_id_tag(id_tag *idt)
{
gt_string_delete(idt->instrument_name);
gt_string_delete(idt->run);
gt_string_delete(idt->flowcell);
gt_string_delete(idt->index);
free(idt);
}
static uint64_t parse_id_tag(gt_string *tag,id_tag *idt)
{
// @HWUSI-EAS100R:6:73:941:1973#0/1 Old style Casava tag
// Machine:lane:tile:x:y#multiplex tag/read
//
// @EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG New style (v1.8) tag
// Machine:run:flowcell:lane:tile:x:y read:filter:flags:multiplex tag
//
// The end of the tag (/read for old style or the part after the space for the new style) may have been trimmed
//
char type[11],*tg,*p;
int fields[11],len[11];
int i=0,ix=0,err=ID_TAG_OK;
clear_id_tag(idt);
tg=gt_string_get_string(tag);
ix=0;
fields[0]=0;
register char c;
while(tg[ix] && i<11) {
while(!(c=id_brk_char[(int)tg[ix]])) ix++;
len[i]=ix+1-fields[i];
type[i++]=c;
ix++;
if(c==ID_END_CHAR || i==11) break;
if(c==ID_SPACE_CHAR) while((c=id_brk_char[(int)tg[ix]])==ID_SPACE_CHAR) ix++; // Skip multiple white space characters
fields[i]=ix;
}
int j;
for(j=0;j<i;j++) if(type[j]!=ID_COLON_CHAR) break;
int sidx;
if((i==6 || i==7) && j==4 && type[j]==ID_HASH_CHAR) {
gt_string_copy_substr(idt->instrument_name,tag,fields[0],len[0]);
sidx=1;
} else {
gt_string_copy_substr(idt->instrument_name,tag,fields[0],len[0]);
gt_string_copy_substr(idt->run,tag,fields[1],len[1]);
gt_string_copy_substr(idt->flowcell,tag,fields[2],len[2]);
sidx=3;
}
idt->lane=(uint32_t)strtoul(tg+fields[sidx],&p,10);
if(idt->lane<1 || idt->lane>MAX_LANE_ID) {
err=ID_TAG_ERROR_BAD_LANE;
}
if(err==ID_TAG_OK) {
idt->tile=(uint32_t)strtoul(tg+fields[sidx+1],&p,10);
if(idt->tile<1) {
err=ID_TAG_ERROR_BAD_TILE;
}
}
if(err==ID_TAG_OK) {
idt->x=(uint32_t)strtoul(tg+fields[sidx+2],&p,10);
if(idt->x<1) {
err=ID_TAG_ERROR_BAD_COORD;
}
}
if(err==ID_TAG_OK) {
idt->y=(uint32_t)strtoul(tg+fields[sidx+3],&p,10);
if(idt->y<1) {
err=ID_TAG_ERROR_BAD_COORD;
}
}
// if(err==ID_TAG_OK) {
// printf("Machine: " PRIgts "\tRun: " PRIgts "\tFC: " PRIgts "\tLane: %d\tTile: %" PRIu32 "\tX,Y: %" PRIu32 ",%" PRIu32 "\n",PRIgts_content(idt->instrument_name),PRIgts_content(idt->run),PRIgts_content(idt->flowcell),idt->lane,idt->tile,idt->x,idt->y);
// }
return err;
}
static as_stats* as_stats_new(bool paired)
{
as_stats* stats=as_calloc((size_t)1,sizeof(as_stats));
stats->max_indel_length=50; // We can expand if necessary
stats->paired=paired;
int i,j=(paired==true?2:1);
for(i=0;i<j;i++) {
int k;
for(k=0;k<2;k++) stats->indel_length[i*2+k]=as_calloc(sizeof(uint64_t),stats->max_indel_length+1);
}
stats->insert_size=0;
stats->loc_hash=0;
return stats;
}
static void as_stats_free(as_stats *stats)
{
uint64_t i,j=(stats->paired==true?2:1);
for(i=0;i<j;i++) {
if(stats->curr_read_store[i]) {
free(stats->read_length_stats[i]);
uint64_t k;
for(k=0;k<2;k++) {
free(stats->indel_length[i*2+k]);
free(stats->indel_stats[i*2+k]);
}
for(k=0;k<stats->curr_read_store[i];k++) free(stats->base_counts_by_cycle[i][k]);
for(k=i*(MAX_QUAL+1);k<(i+1)*(MAX_QUAL+1);k++) {
free(stats->mm_stats[k]);
free(stats->qual_stats[k]);
}
free(stats->base_counts_by_cycle[i]);
}
}
if(stats->paired==true) {
HASH_CLEAR(hh,stats->insert_size);
}
free(stats);
}
static void add_indel_stats(as_stats *stats,uint64_t size,uint64_t cycle,uint64_t ix)
{
stats->indel_stats[ix][cycle]++;
if(size<=MAX_INDEL_SIZE) {
if(size>stats->max_indel_length) {
uint64_t nsize=size*1.2;
if(nsize>MAX_INDEL_SIZE) nsize=MAX_INDEL_SIZE;
uint64_t i,j=(stats->paired==true?2:1);
for(i=0;i<j;i++) {
int k;
for(k=0;k<2;k++) {
stats->indel_length[i*2+k]=as_realloc(stats->indel_length[i*2+k],sizeof(uint64_t)*(nsize+1));
uint64_t sz;
for(sz=stats->max_indel_length+1;sz<=nsize;sz++) stats->indel_length[i*2+k][sz]=0;
}
}
stats->max_indel_length=nsize;
}
stats->indel_length[ix][size]++;
}
}
static dist_element *as_find_insert_counter(dist_element **de,int64_t x)
{
dist_element *new_de;
HASH_FIND(hh,*de,&x,sizeof(int64_t),new_de);
if(!new_de) {
new_de=as_malloc(sizeof(dist_element));
int i;
for(i=0;i<4;i++) new_de->ct[i]=0;
new_de->x=x;
HASH_ADD(hh,*de,x,sizeof(int64_t),new_de);
}
return new_de;
}
static dist_element *as_increase_insert_count(dist_element **de,int ix,int64_t x)
{
dist_element *new_de=as_find_insert_counter(de,x);
new_de->ct[ix]++;
return new_de;
}
#define LH_BIN_SIZE 1024
static pthread_rwlock_t loc_hash_rwlock;
static void insert_loc(as_stats *stats,uint64_t x,int64_t ins_size,uint32_t tile,gt_string *ctg)
{
loc_hash *lh;
unsigned int k=x/LH_BIN_SIZE;
uint16_t loc=x%LH_BIN_SIZE;
pthread_rwlock_rdlock(&loc_hash_rwlock);
HASH_FIND_STR(*stats->loc_hash,gt_string_get_string(ctg),lh);
pthread_rwlock_unlock(&loc_hash_rwlock);
if(!lh) {
lh=as_malloc(sizeof(loc_hash));
lh->ctg=strdup(gt_string_get_string(ctg));
lh->lblock=0;
pthread_rwlock_init(&lh->rwlock,NULL);
pthread_rwlock_wrlock(&loc_hash_rwlock);
HASH_ADD_KEYPTR(hh,*stats->loc_hash,lh->ctg,(int)strlen(lh->ctg),lh);
pthread_rwlock_unlock(&loc_hash_rwlock);
}
loc_block *lb;
pthread_rwlock_rdlock(&lh->rwlock);
HASH_FIND_INT(lh->lblock,&k,lb);
pthread_rwlock_unlock(&lh->rwlock);
if(!lb) {
lb=as_malloc(sizeof(loc_block));
lb->n_elem=0;
lb->size=INIT_LB_SIZE;
lb->x=k;
lb->elem=as_malloc(lb->size*sizeof(loc_elem));
pthread_mutex_init(&lb->mutex,NULL);
pthread_rwlock_wrlock(&lh->rwlock);
HASH_ADD_INT(lh->lblock,x,lb);
pthread_rwlock_unlock(&lh->rwlock);
}
pthread_mutex_lock(&lb->mutex);
if(lb->n_elem==lb->size) {
lb->size*=1.5;
lb->elem=as_realloc(lb->elem,lb->size*sizeof(loc_elem));
}
loc_elem* le=lb->elem+(lb->n_elem++);
pthread_mutex_unlock(&lb->mutex);
le->loc=loc;
le->tile=tile;
le->dist=ins_size;
}
static void as_stats_resize(as_stats *stats,uint64_t rd,uint64_t l)
{
stats->max_read_length[rd]=l;
if(l>=stats->curr_read_store[rd]) {
uint64_t i,nlen=l*1.5; // Allocate a bit more space than we need now to avoid un-necessary re-sizing in future
if(stats->curr_read_store[rd]) {
stats->read_length_stats[rd]=as_realloc(stats->read_length_stats[rd],nlen*sizeof(uint64_t));
stats->base_counts_by_cycle[rd]=as_realloc(stats->base_counts_by_cycle[rd],sizeof(void *)*nlen);
uint64_t j;
for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j]=as_realloc(stats->indel_stats[j],sizeof(uint64_t)*nlen);
for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) {
stats->mm_stats[j]=as_realloc(stats->mm_stats[j],sizeof(uint64_t)*nlen);
stats->qual_stats[j]=as_realloc(stats->qual_stats[j],sizeof(uint64_t)*nlen);
}
for(i=stats->curr_read_store[rd];i<nlen;i++) {
stats->read_length_stats[rd][i]=0;
stats->base_counts_by_cycle[rd][i]=as_calloc((size_t)(MAX_QUAL+1)*5,sizeof(uint64_t));
for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j][i]=0;
for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) {
stats->mm_stats[j][i]=0;
stats->qual_stats[j][i]=0;
}
}
} else {
stats->read_length_stats[rd]=as_calloc((size_t)nlen,sizeof(uint64_t));
stats->base_counts_by_cycle[rd]=as_malloc(sizeof(void *)*nlen);
uint64_t j;
for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t));
for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) {
stats->mm_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t));
stats->qual_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t));
}
for(i=0;i<nlen;i++) {
stats->base_counts_by_cycle[rd][i]=as_calloc((size_t)(MAX_QUAL+1)*5,sizeof(uint64_t));
}
}
stats->curr_read_store[rd]=nlen;
}
}
static void get_error_profile(as_stats *stats,gt_alignment *al,uint64_t rd,int qual_offset)
{
static int mis_type[]={0,2,1,2,2,0,2,1,1,2,0,2,2,1,2,0};
if(!al->maps->used) return;
// Get first map only from alignment
register gt_string* const read = al->read;
register gt_string* const quals = al->qualities;
register const bool has_qualities = gt_alignment_has_qualities(al);
gt_map *map=gt_alignment_get_map(al,0);
register int quality_misms = 0;
uint64_t i;
for(i=0;i<read->length;i++) {
if(has_qualities) {
quality_misms = gt_string_get_string(quals)[i]-qual_offset;
if(quality_misms>MAX_QUAL) quality_misms=MAX_QUAL;
} else quality_misms=0;
stats->qual_stats[rd*(MAX_QUAL+1)+quality_misms][i]++;
}
GT_MAP_ITERATE(map,map_block) {
GT_MISMS_ITERATE(map_block,misms) {
if (has_qualities) {
quality_misms = gt_string_get_string(quals)[misms->position]-qual_offset;
if(quality_misms>MAX_QUAL) quality_misms=MAX_QUAL;
else if(quality_misms<0) quality_misms=0;
}
switch (misms->misms_type) {
case MISMS:
stats->mm_stats[rd*(MAX_QUAL+1)+quality_misms][misms->position]++;
int base=base_tab[(int)gt_string_get_string(read)[misms->position]]-2;
int rbase=base_tab[(int)misms->base]-2;
if(base>=0 && rbase>=0) {
// Get transition/transversion counts
int type=mis_type[(base<<2)|rbase];
if(type==1) stats->ts_stats[rd][quality_misms]++;
else if(type==2) stats->tv_stats[rd][quality_misms]++;
}
if(base>=0 && misms->position) {
int prev_base=base_tab[(int)gt_string_get_string(read)[misms->position-1]]-2;
if(prev_base>=0) {
if(base==prev_base) stats->pbc_stats[rd][quality_misms]++;
else stats->pbn_stats[rd][quality_misms]++;
}
}
break;
case INS:
add_indel_stats(stats,misms->size,misms->position,rd);
break;
case DEL:
add_indel_stats(stats,misms->size,misms->position,2+rd);
break;
}
}
}
}
static void as_collect_stats(gt_template* template,as_stats* stats,as_param *param,id_tag *idt)
{
stats->nreads++;
uint64_t nrd;
bool paired_file=false; // Was the input file from a paired mapping
if(gt_input_generic_parser_attributes_is_paired(param->parser_attr)) {
if(gt_template_get_num_blocks(template)!=2) {
gt_fatal_error_msg("Fatal error: Expecting paired reads\n");
}
nrd=2;
if(!param->input_files[1]) paired_file=true;
} else {
if(gt_template_get_num_blocks(template)!=1) {
gt_fatal_error_msg("Fatal error: Expecting unpaired reads\n");
}
nrd=1;
}
uint64_t j;
gt_alignment *al[2];
char *rd[2],*ql[2];
uint64_t len[2];
register int qual_offset=param->qual_offset;
// Get alignments, qualities, reads and lengths for both ends
for(j=0;j<nrd;j++) {
al[j]=gt_template_get_block(template,j);
rd[j]=gt_alignment_get_read(al[j]);
ql[j]=gt_alignment_get_qualities(al[j]);
len[j]=strlen(rd[j]);
// Update yield max_read_length and resize stats arrays if necessary
if(stats->max_read_length[j]<len[j]) as_stats_resize(stats,j,len[j]);
stats->read_length_stats[j][len[j]]++;
uint64_t i,yld=0;
char *p=rd[j];
char *q=ql[j];
uint64_t **bc=stats->base_counts_by_cycle[j];
for(i=0;i<len[j];i++) {
int base=base_tab[(int)p[i]]-1;
int qual=q[i]-qual_offset;
if(qual<0 || qual>MAX_QUAL || base<0) {
gt_fatal_error_msg("Illegal base or quality character '%c %c' in read\n",p[i],q[i]);
}
if(base) yld++;
bc[i][qual*5+base]++;
}
stats->yield[j]+=yld;
}
// Filter maps (both single and paired end) to remove maps after first zero strata after the first hit
uint64_t nmaps[3]={0,0,0};
uint64_t max_dist[3]={0,0,0};
bool ambig[2]={false,false};
// Single end alignments
for(j=0;j<nrd;j++) {
uint64_t i,k;
k=gt_alignment_get_num_counters(al[j]);
for(i=0;i<k;i++) {
uint64_t x=gt_alignment_get_counter(al[j],i);
if(x) {
nmaps[j]+=x;
max_dist[j]=i;
} else if(nmaps[j]) break;
}
if(i==k-1 && paired_file==false) ambig[j]=true;
// Collect error states from first alignment only
if(nmaps[j]) {
get_error_profile(stats,al[j],j,qual_offset);
}
}
if(nrd==2) {
// Paired end alignments
uint64_t i,k;
k=gt_template_get_num_counters(template);
for(i=0;i<k;i++) {
uint64_t x=gt_template_get_counter(template,i);
if(x) {
nmaps[2]+=x;
max_dist[2]=i;
} else if(nmaps[2]) break;
}
// Now count number of split maps for each end
uint64_t nsplit[3]={0,0,0};
for(j=0;j<2;j++) {
bool flg=false;
GT_ALIGNMENT_ITERATE(al[j],map) {
if(gt_map_get_distance(map)<=max_dist[j]) {
if(gt_map_get_num_blocks(map)>1) nsplit[j]++;
else flg=true;
}
}
if(nsplit[j]) {
stats->reads_with_splitmaps[j]++;
if(flg==false) stats->reads_only_with_splitmaps[j]++;
}
}
// And for paired alignments
bool flg=false;
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,maps,maps_attr) {
if(maps[0] && maps[1]) {
if(maps_attr->distance<=max_dist[2]) {
if(gt_map_get_num_blocks(maps[0])>1 || gt_map_get_num_blocks(maps[1])>1) nsplit[2]++;
else flg=true;
}
}
}
if(nsplit[2]) {
stats->reads_with_splitmaps[2]++;
if(flg==false) stats->reads_only_with_splitmaps[2]++;
}
for(j=0;j<2;j++) {
if(nmaps[j]) {
stats->mapped[j]++;
if(nmaps[j]==1) {
if(ambig[j]==false) stats->unique[j]++;
else stats->ambiguous[j]++;
}
}
}
if(nmaps[2]) {
stats->paired_mapped++;
if(nmaps[2]==1 && (nmaps[0]<=gt_alignment_get_num_maps(al[0])) && (nmaps[1]<=gt_alignment_get_num_maps(al[1]))) {
stats->paired_unique++;
maps=gt_template_get_mmap_array(template,0,NULL);
if(maps[0] && maps[1]) {
gt_status gt_err;
int64_t ins_size=gt_template_get_insert_size(maps,>_err,0,0);
if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK) {
dist_element* de=as_increase_insert_count(&stats->insert_size,AS_INSERT_TYPE_PAIRED,ins_size);
if(nmaps[0]>1 || nmaps[1]>1) de->ct[AS_INSERT_TYPE_RECOVERED]++;
if(nsplit[2]) de->ct[AS_INSERT_TYPE_SPLIT]++;
}
}
}
}
// Track insert sizes for all pairs where single end reads are uniquely mapping
if(nmaps[0]==1 && nmaps[1]==1) {
gt_status gt_err;
gt_map *tmaps[2];
tmaps[0]=gt_alignment_get_map(al[0],0);
tmaps[1]=gt_alignment_get_map(al[1],0);
uint64_t xx;
gt_string *contig;
int64_t ins_size=gt_template_get_insert_size(tmaps,>_err,&xx,&contig);
if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK) {
(void)as_increase_insert_count(&stats->insert_size,AS_INSERT_TYPE_ALL_UNIQUE,ins_size);
stats->paired_type[PAIR_TYPE_DS]++;
insert_loc(stats,xx,ins_size,idt->tile,contig);
} else if(gt_err==GT_TEMPLATE_INSERT_SIZE_SAME_STRAND) stats->paired_type[PAIR_TYPE_SS]++;
else if(gt_err==GT_TEMPLATE_INSERT_SIZE_DIFFERENT_CONTIGS) stats->paired_type[PAIR_TYPE_MM]++;
}
} else {
/* We can still track duplicates for single end reads, although we will find too many */
gt_map* tmap=gt_alignment_get_map(al[0],0);
insert_loc(stats,tmap->position,0,idt->tile,tmap->seq_name);
}
}
static int cmp_dist_elem(dist_element *a,dist_element *b)
{
return a->x-b->x;
}
static int cmp_loc_elem(const void *s1,const void *s2)
{
const loc_elem *le1,*le2;
int x;
le1=s1;
le2=s2;
if(!(x=le1->loc-le2->loc)) {
if(!(x=abs(le1->dist)-abs(le2->dist))) {
if(!(x=le1->tile-le2->tile)) {
x=le1->dist-le2->dist;
}
}
}
return x;
}
static void *as_calc_duplicate_rate(void *ss)
{
as_param* param=ss;
as_stats* stats=param->stats[0];
loc_hash* lh=*(stats->loc_hash);
uint64_t (*dup_cnt)[DUP_LIMIT+1]=stats->duplicate_counts;
uint64_t dcounts[2][DUP_LIMIT+1];
uint64_t tot=0;
int i,j;
for(i=0;i<5;i++) for(j=0;j<=DUP_LIMIT;j++) dup_cnt[i][j]=0;
for(;lh;lh=lh->hh.next) {
loc_block *lb;
for(lb=lh->lblock;lb;lb=lb->hh.next) {
qsort(lb->elem,lb->n_elem,sizeof(loc_elem),cmp_loc_elem);
u_int16_t tile,loc;
int16_t dst=0;
tile=loc=0;
int k,k1,xx;
k=k1=xx=0;
uint64_t kk[4]={0,0,0,0};
tot+=lb->n_elem;
loc_elem* le=lb->elem;
for(i=0;i<(int)lb->n_elem;i++,le++) {
if(le->loc!=loc || abs(le->dist)!=abs(dst)) {
if(k) {
if(k>DUP_LIMIT) k=DUP_LIMIT+1;
else if(k>1) {
assert(xx<=DUP_LIMIT);
dcounts[0][xx]=kk[0];
dcounts[1][xx++]=kk[1];
for(k1=0;k1<4;k1++) kk[k1]=0;
for(k1=0;k1<xx;k1++) {
int k2;
for(k2=0;k2<2;k2++) {
int k3=dcounts[k2][k1];
if(k3>1) kk[0]+=k3*(k3-1);
}
kk[1]+=dcounts[0][k1]*dcounts[1][k1];
for(k2=0;k2<k1;k2++) {
kk[2]+=dcounts[0][k1]*dcounts[0][k2]+dcounts[1][k1]*dcounts[1][k2];
kk[3]+=dcounts[0][k1]*dcounts[1][k2]+dcounts[1][k1]*dcounts[0][k2];
}
}
kk[0]>>=1;
for(k1=0;k1<4;k1++) dup_cnt[k1+1][k-1]+=kk[k1];
xx=0;
}
dup_cnt[0][k-1]++;
}
k=1;
xx=0;
tile=le->tile;loc=le->loc;
dst=le->dist;
k1=(dst<0)?0:1;
kk[k1]=1;
kk[k1^1]=0;
} else {
k++;
if(le->tile!=tile) {
if(xx<DUP_LIMIT) {
dcounts[0][xx]=kk[0];
dcounts[1][xx++]=kk[1];;
tile=le->tile;
dst=le->dist;
k1=(dst<0)?0:1;
kk[k1]=1;
kk[k1^1]=0;
}
} else {
if(le->dist!=dst) {
k1=1;
dst=le->dist;
}
kk[k1]++;
}
}
}
if(k) {
if(k>DUP_LIMIT) k=DUP_LIMIT+1;
else if(k>1) {
assert(xx<=DUP_LIMIT);
dcounts[0][xx]=kk[0];
dcounts[1][xx++]=kk[1];
for(k1=0;k1<4;k1++) kk[k1]=0;
for(k1=0;k1<xx;k1++) {
int k2;
for(k2=0;k2<2;k2++) {
int k3=dcounts[k2][k1];
if(k3>1) kk[0]+=k3*(k3-1);
}
kk[1]+=dcounts[0][k1]*dcounts[1][k1];
for(k2=0;k2<k1;k2++) {
kk[2]+=dcounts[0][k1]*dcounts[0][k2]+dcounts[1][k1]*dcounts[1][k2];
kk[3]+=dcounts[0][k1]*dcounts[1][k2]+dcounts[1][k1]*dcounts[0][k2];
}
}
kk[0]>>=1;
for(k1=0;k1<4;k1++) dup_cnt[k1+1][k-1]+=kk[k1];
xx=0;
}
dup_cnt[0][k-1]++;
}
}
}
double z1,z2,z3,z4,z5,z6;
z1=z2=z3=z4=z5=z6=0.0;
//int k=0;
for(i=0;i<=DUP_LIMIT;i++) {
z1+=(double)dup_cnt[0][i];
z2+=(double)dup_cnt[0][i]*(i+1);
z3+=(double)dup_cnt[1][i];
z4+=(double)dup_cnt[2][i];
z5+=(double)dup_cnt[3][i];
z6+=(double)dup_cnt[4][i];
//if(dup_cnt[0][i]) k=i;
}
if(z2 && (z3+z4+z5+z6)) {
double z,z7,z8,z9;
z=1.0-z1/z2;
z1=(z3*z6>0.0)?(z3*(z5+z6)-z5*(z3+z4))/(z3*z6):1.0;
z2=z3*z1/(z3+z4+z5+z6);
z7=1.0-(1.0-z2)*z;
z8=tot*1000.0;
z9=z8;
for(i=0;i<10000;i++) {
z8=tot*z7/(1.0-exp(log(1.0-1.0/z8)*tot));
if(fabs(z8-z9)<1.0e-2) break;
z9=z8;
}
stats->duplicate_rate[0]=z;
stats->duplicate_rate[1]=z2;
stats->duplicate_reads_used=tot;
stats->unique_fragment_estimate=(uint64_t)(z8+.5);
} else {
stats->duplicate_rate[0]=stats->duplicate_rate[1]=0.0;
stats->duplicate_reads_used=stats->unique_fragment_estimate=0;
}
return 0;
}
static void *as_merge_stats(void *ss)
{
uint64_t i,j,k,k1,nr,len[2],ins_size;
as_param* param=ss;
as_stats** st=param->stats;
uint64_t nt=param->num_threads;
bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr);
nr=paired?2:1;
for(j=0;j<nr;j++) {
len[j]=st[0]->max_read_length[j];
for(i=1;i<nt;i++) {
if(st[i]->max_read_length[j]>len[j]) len[j]=st[i]->max_read_length[j];
}
if(len[j]>st[0]->max_read_length[j]) as_stats_resize(st[0],j,len[j]);
}
ins_size=st[0]->max_indel_length;
for(i=1;i<nt;i++) if(st[i]->max_indel_length>ins_size) ins_size=st[i]->max_indel_length;
if(ins_size>st[0]->max_indel_length) {
for(j=0;j<nr;j++) {
int k;
for(k=0;k<2;k++) {
st[0]->indel_length[j*2+k]=as_realloc(st[0]->indel_length[j*2+k],sizeof(uint64_t)*(ins_size+1));
uint64_t sz;
for(sz=st[0]->max_indel_length+1;sz<=ins_size;sz++) st[0]->indel_length[j*2+k][sz]=0;
}
}
}
for(i=1;i<nt;i++) {
st[0]->nreads+=st[i]->nreads;
for(j=0;j<nr;j++) {
st[0]->yield[j]+=st[i]->yield[j];
st[0]->mapped[j]+=st[i]->mapped[j];
st[0]->unique[j]+=st[i]->unique[j];
st[0]->ambiguous[j]+=st[i]->ambiguous[j];
for(k=0;k<=MAX_QUAL;k++) {
st[0]->ts_stats[j][k]+=st[i]->ts_stats[j][k];
st[0]->tv_stats[j][k]+=st[i]->tv_stats[j][k];
st[0]->pbc_stats[j][k]+=st[i]->pbc_stats[j][k];
st[0]->pbn_stats[j][k]+=st[i]->pbn_stats[j][k];
}
len[j]=st[i]->max_read_length[j];
uint64_t **bc0=st[0]->base_counts_by_cycle[j];
uint64_t **bc1=st[i]->base_counts_by_cycle[j];
if(st[i]->curr_read_store[j]) {
if(st[i]->curr_read_store[j]>st[0]->curr_read_store[j])
as_stats_resize(st[0],j,st[i]->curr_read_store[j]);
for(k=0;k<=len[j];k++) {
st[0]->read_length_stats[j][k]+=st[i]->read_length_stats[j][k];
for(k1=0;k1<5*(MAX_QUAL+1);k1++) bc0[k][k1]+=bc1[k][k1];
}
for(k1=j*(MAX_QUAL+1);k1<(j+1)*(MAX_QUAL+1);k1++) {
for(k=0;k<len[j];k++) {
st[0]->mm_stats[k1][k]+=st[i]->mm_stats[k1][k];
st[0]->qual_stats[k1][k]+=st[i]->qual_stats[k1][k];
}
}
for(k1=j*2;k1<j*2+2;k1++) {
for(k=0;k<len[j];k++) st[0]->indel_stats[k1][k]+=st[i]->indel_stats[k1][k];
for(k=0;k<=st[i]->max_indel_length;k++) st[0]->indel_length[k1][k]+=st[i]->indel_length[k1][k];
}
}
}
for(j=0;j<3;j++) {
st[0]->paired_type[j]+=st[i]->paired_type[j];
st[0]->bis_stats[j]+=st[i]->bis_stats[j];
st[0]->reads_with_splitmaps[j]+=st[i]->reads_with_splitmaps[j];
st[0]->reads_only_with_splitmaps[j]+=st[i]->reads_only_with_splitmaps[j];
}
if(paired) {
st[0]->paired_mapped+=st[i]->paired_mapped;
st[0]->paired_unique+=st[i]->paired_unique;
dist_element *de,*de1;
for(de=st[i]->insert_size;de!=NULL;de=de->hh.next) {
de1=as_find_insert_counter(&st[0]->insert_size,de->x);
for(j=0;j<4;j++) de1->ct[j]+=de->ct[j];
}
}
as_stats_free(st[i]);
}
HASH_SORT(st[0]->insert_size,cmp_dist_elem);
return 0;
}
static void as_print_yield_summary(FILE *f,as_param *param)
{
bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr);
as_stats* st=param->stats[0];
uint64_t trimmed[2]={0,0},yield[2]={0,0},min_rl[2],i,j,k;
fputs("Yield summary\n\n",f);
j=paired?2:1;
for(i=0;i<j;i++) {
uint64_t l=st->max_read_length[i];
for(k=0;k<=l;k++) if(st->read_length_stats[i][k]) break;
min_rl[i]=k;
for(;k<=l;k++) {
uint64_t x=st->read_length_stats[i][k];
trimmed[i]+=(l-k)*x;
yield[i]+=k*x;
}
}
if(paired) {
fprintf(f,"Paired end reads. No. pairs =\t%" PRIu64 "\n",st->nreads);
if(!param->variable_read_length) {
fprintf(f,"Read lengths:\tRead 1 =\t%" PRIu64 "\tRead 2 =\t%" PRIu64 "\n",st->max_read_length[0],st->max_read_length[1]);
fprintf(f,"Yield PF:\tRead 1 = \t%" PRIu64 "\tRead 2 = \t%" PRIu64 "\tTotal = \t%" PRIu64 "\n",yield[0]+trimmed[0],yield[1]+trimmed[1],yield[0]+yield[1]+trimmed[0]+trimmed[1]);
fprintf(f,"Bases trimmed:\tRead 1 = \t%" PRIu64 "\t(%.2f%%)\tRead 2 = \t%" PRIu64 "\t(%.2f%%)\tTotal = \t%" PRIu64 "\t(%.2f%%)\n",
trimmed[0],100.0*(double)trimmed[0]/(double)(yield[0]+trimmed[0]),
trimmed[1],100.0*(double)trimmed[1]/(double)(yield[1]+trimmed[1]),
trimmed[0]+trimmed[1],100.0*(double)(trimmed[0]+trimmed[1])/(double)(yield[0]+yield[1]+trimmed[0]+trimmed[1]));
} else {
fprintf(f,"Read lengths:\tRead 1 =\t%" PRIu64 " - %" PRIu64 "\tRead 2 =\t%" PRIu64 " - %" PRIu64 "\n",min_rl[0],st->max_read_length[0],min_rl[1],st->max_read_length[1]);
fprintf(f,"Yield PF:\tRead 1 =\t%" PRIu64 "\tRead 2 =\t%" PRIu64 "\tTotal =\t%" PRIu64 "\n",yield[0],yield[1],yield[0]+yield[1]);
}
fprintf(f,"No calls:\tRead 1 =\t%" PRIu64 "\t(%.2f%%)\tRead 2 =\t%" PRIu64 "\t(%.2f%%)\tTotal =\t%" PRIu64 "\t(%.2f%%)\n",
yield[0]-st->yield[0],100.0*(double)(yield[0]-st->yield[0])/(double)yield[0],
yield[1]-st->yield[1],100.0*(double)(yield[1]-st->yield[1])/(double)yield[1],
yield[0]+yield[1]-st->yield[0]-st->yield[1],100.0*(double)(yield[0]+yield[1]-st->yield[0]-st->yield[1])/(double)(yield[0]+yield[1]));
fprintf(f,"%s yield:\tRead 1 =\t%" PRIu64 "\t(%.2f%%)\tRead 2 =\t%" PRIu64 "\t(%.2f%%)\tTotal =\t%" PRIu64 "\t(%.2f%%)\n",param->variable_read_length?"Clean":"Trimmed",
st->yield[0],100.0*(double)st->yield[0]/(double)(yield[0]+trimmed[0]),
st->yield[1],100.0*(double)st->yield[1]/(double)(yield[1]+trimmed[1]),
st->yield[0]+st->yield[1],100.0*(double)(st->yield[0]+st->yield[1])/(double)(yield[0]+yield[1]+trimmed[0]+trimmed[1]));
} else {
fprintf(f,"Single end reads. No. reads =\t%" PRIu64 "\n",st->nreads);
if(!param->variable_read_length) {
fprintf(f,"Read length:\t%" PRIu64 "\n",st->max_read_length[0]);
fprintf(f,"Yield PF:\t%" PRIu64 "\n",yield[0]+trimmed[0]);
fprintf(f,"Bases trimmed:\t=\t%" PRIu64 "\t(%.2f%%)\n",
trimmed[0],100.0*(double)trimmed[0]/(double)(yield[0]+trimmed[0]));
} else {
fprintf(f,"Read length:\t%" PRIu64 " - %" PRIu64 "\n",min_rl[0],st->max_read_length[0]);
fprintf(f,"Yield PF:\t%" PRIu64 "\n",yield[0]);
}
fprintf(f,"No calls =\t%" PRIu64 "\t(%.2f%%)\n",yield[0]-st->yield[0],100.0*(double)(yield[0]-st->yield[0])/(double)yield[0]);
fprintf(f,"%s yield:\t%" PRIu64 "\t(%.2f%%)\n",param->variable_read_length?"Clean":"Trimmed",st->yield[0],100.0*(double)st->yield[0]/(double)(yield[0]+trimmed[0]));
}
}
static void as_print_mapping_summary(FILE *f,as_param *param)
{
bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr);
as_stats *st=param->stats[0];
bool paired_file=false; // Was the input file from a paired mapping
if(paired==true && !param->input_files[1]) paired_file=true;
uint64_t counts[4]={0,0,0,0};
dist_element *de;
int j;
for(de=st->insert_size;de;de=de->hh.next) {
for(j=0;j<4;j++) counts[j]+=de->ct[j];
}
double zcounts[4];
for(j=0;j<4;j++) zcounts[j]=(double)counts[j];
fputs("\nSingle end mapping summary\n\n",f);
double z=(double)st->nreads;
if(paired==true) {
fprintf(f,"Uniquely mapping reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n",
st->unique[0],100.0*(double)st->unique[0]/z,
st->unique[1],100.0*(double)st->unique[1]/z,
st->unique[0]+st->unique[1],100.0*(double)(st->unique[0]+st->unique[1])/(z+z));
uint64_t mult[3];
mult[0]=st->mapped[0]-st->unique[0]-st->ambiguous[0];
mult[1]=st->mapped[1]-st->unique[1]-st->ambiguous[1];
fprintf(f,"Multiply mapping reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n",
mult[0],100.0*(double)mult[0]/z,
mult[1],100.0*(double)mult[1]/z,
mult[0]+mult[1],100.0*(double)(mult[0]+mult[1])/(z+z));
uint64_t unmap[3];
unmap[0]=st->nreads-st->mapped[0];
unmap[1]=st->nreads-st->mapped[1];
fprintf(f,"Unmapped reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n",
unmap[0],100.0*(double)unmap[0]/z,
unmap[1],100.0*(double)unmap[1]/z,
unmap[0]+unmap[1],100.0*(double)(unmap[0]+unmap[1])/(z+z));
if(paired_file==false) {
fprintf(f,"Ambiguous reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n\n",
st->ambiguous[0],100.0*(double)st->ambiguous[0]/z,
st->ambiguous[1],100.0*(double)st->ambiguous[1]/z,
st->ambiguous[0]+st->ambiguous[1],100.0*(double)(st->ambiguous[0]+st->ambiguous[1])/(z+z));
}
fprintf(f,"Reads with splitmaps:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n",
st->reads_with_splitmaps[0],100.0*(double)st->reads_with_splitmaps[0]/z,
st->reads_with_splitmaps[1],100.0*(double)st->reads_with_splitmaps[1]/z,
st->reads_with_splitmaps[0]+st->reads_with_splitmaps[1],100.0*(double)(st->reads_with_splitmaps[0]+st->reads_with_splitmaps[1])/(z+z));
fprintf(f,"Reads with only splitmaps:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n",
st->reads_only_with_splitmaps[0],100.0*(double)st->reads_only_with_splitmaps[0]/z,
st->reads_only_with_splitmaps[1],100.0*(double)st->reads_only_with_splitmaps[1]/z,
st->reads_only_with_splitmaps[0]+st->reads_only_with_splitmaps[1],100.0*(double)(st->reads_only_with_splitmaps[0]+st->reads_only_with_splitmaps[1])/(z+z));
fputs("\nPaired end mapping summary\n\n",f);
fprintf(f,"Uniquely mapping read pairs:\t%" PRIu64 "\t(%g%%)\n",st->paired_unique,100.0*(double)st->paired_unique/z);
mult[2]=st->paired_mapped-st->paired_unique;
fprintf(f,"Multiply mapping reads:\t%" PRIu64 "\t(%g%%)\n",mult[2],100.0*(double)mult[2]/z);
unmap[2]=st->nreads-st->paired_mapped;
fprintf(f,"Unmapped read pairs:\t%" PRIu64 "\t(%g%%)\n",unmap[2],100.0*(double)unmap[2]/z);
fprintf(f,"Read pairs with splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_with_splitmaps[2],100.0*(double)st->reads_with_splitmaps[2]/z);
fprintf(f,"Read pairs with only splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_only_with_splitmaps[2],100.0*(double)st->reads_only_with_splitmaps[2]/z);
fputs("\nPair statistics (uniquely mapping read pairs only)\n\n",f);
uint64_t cnt[4]={0,0,0,0};
double lim[3]={.25,.5,.75};
int state[4]={0,0,0,0};
int64_t Q[3][4]={{0,0,0,0},{0,0,0,0},{0,0,0,0}};
dist_element *de;
int ct=0;
for(de=st->insert_size;de && ct<4;de=de->hh.next) {
int i;
for(i=0;i<4;i++) {
if(state[i]<3) {
if((double)cnt[i]/zcounts[i]>=lim[state[i]]) {
Q[state[i]++][i]=de->x;
if(state[i]==3) ct++;
}
}
cnt[i]+=de->ct[i];
}
}
double ztot=(double)(st->paired_type[0]+st->paired_type[1]+st->paired_type[2]);
fprintf(f,"Read pairs on different strand (DS):\t%" PRIu64 "\t(%g%%)\n",
st->paired_type[PAIR_TYPE_DS],100.0*(double)st->paired_type[PAIR_TYPE_DS]/ztot);
fprintf(f,"Read pairs on same strand (SS):\t%" PRIu64 "\t(%g%%)\n",
st->paired_type[PAIR_TYPE_SS],100.0*(double)st->paired_type[PAIR_TYPE_SS]/ztot);
fprintf(f,"Read pairs on different contigs:\t%" PRIu64 "\t(%g%%)\n",
st->paired_type[PAIR_TYPE_MM],100.0*(double)st->paired_type[PAIR_TYPE_MM]/ztot);
fputs("\nInsert size summary\n\n",f);
fprintf(f,"Selected unique read pairs:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[0],Q[0][0],Q[1][0],Q[2][0]);
fprintf(f,"All unique read pairs:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[1],Q[0][1],Q[1][1],Q[2][1]);
fprintf(f,"Selected unique read pairs with recovered read:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[2],Q[0][2],Q[1][2],Q[2][2]);
fprintf(f,"Selected unique read pairs with split reads:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[3],Q[0][3],Q[1][3],Q[2][3]);
} else {
fprintf(f,"Uniquely mapping reads:\t%" PRIu64 "\t(%g%%)\n",st->unique[0],100.0*(double)st->unique[0]/z);
uint64_t mult=st->mapped[0]-st->unique[0]-st->ambiguous[0];
fprintf(f,"Multiply mapping reads:\t%" PRIu64 "\t(%g%%)\n",mult,100.0*(double)mult/z);
uint64_t unmap=st->nreads-st->mapped[0];
fprintf(f,"Unmapped reads:\t%" PRIu64 "\t(%g%%)\n",unmap,100.0*(double)unmap/z);
fprintf(f,"Ambiguous mapping reads:\t%" PRIu64 "\t(%g%%)\n\n",st->ambiguous[0],100.0*(double)st->ambiguous[0]/z);
fprintf(f,"Reads with splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_with_splitmaps[0],100.0*(double)st->reads_with_splitmaps[0]/z);
fprintf(f,"Reads with only splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_only_with_splitmaps[0],100.0*(double)st->reads_only_with_splitmaps[0]/z);
}
}
static void as_print_read_lengths(FILE *f,as_param *param)
{
as_stats *st=param->stats[0];
bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr);
if(!st->max_read_length[0]) return;
fprintf(f,"\n\nDistribution of reads lengths after trimming\n");
uint64_t i,l,j,k;
if(paired) {
fputs("Read length\tR1:n_reads\tR1:p\tR2:nreads\tR2:p\n",f);
j=2;
l=st->max_read_length[0]>st->max_read_length[1]?st->max_read_length[0]:st->max_read_length[1];
} else {
fputs("Read length\tn_reads\tp\n",f);
j=1;
l=st->max_read_length[0];
}
double tot[2]={0.0,0.0};
for(i=0;i<=l;i++) {
for(k=0;k<j;k++) {
if(i<=st->max_read_length[k]) tot[k]+=(double)st->read_length_stats[k][i];
}
}
uint64_t x[2];
for(i=0;i<=l;i++) {
for(k=0;k<j;k++) x[k]=(i<=st->max_read_length[k]?st->read_length_stats[k][i]:0);
if(paired) {
if(x[0]||x[1]) {
fprintf(f,"%" PRIu64 "\t%" PRIu64 "\t%.4f\t%" PRIu64 "\t%.4f\n",i,x[0],(double)x[0]/tot[0],x[1],(double)x[1]/tot[1]);
}
} else if(x[0]) {
fprintf(f,"%" PRIu64 "\t%" PRIu64 "\t%.4f\n",i,x[0],(double)x[0]/tot[0]);
}
}
}
static void as_print_distance_file(as_param *param)
{
as_stats* st=param->stats[0];
gt_output_file *file;
if(param->dist_file) {
file=gt_output_file_new_compress(param->dist_file,UNSORTED_FILE,param->compress);
} else {
file=gt_output_stream_new_compress(stdout,UNSORTED_FILE,param->compress);
}
gt_cond_fatal_error(!file,FILE_OPEN,param->dist_file);
FILE *fp=file->file;
uint64_t counts[4]={0,0,0,0};
dist_element *de;
int j;
for(de=st->insert_size;de;de=de->hh.next) {
for(j=0;j<4;j++) counts[j]+=de->ct[j];
}
double zcounts[4];
for(j=0;j<4;j++) zcounts[j]=(double)counts[j];
fputs("Fragment size distribution (uniquely mapping reads):\n\n",fp);
fputs("Size\tPaired\tAll\tRecovered\tSplit\tPaired_freq\tAll_freq\tRecovered_freq\tSplit_freq\n",fp);
for(de=st->insert_size;de;de=de->hh.next) {
fprintf(fp,"%" PRId64 "\t%" PRIu64 "\t%" PRIu64 "\t%" PRIu64 "\t%" PRIu64 "\t%g\t%g\t%g\t%g\n",
de->x,de->ct[0],de->ct[1],de->ct[2],de->ct[3],
(double)de->ct[0]/zcounts[0],(double)de->ct[1]/zcounts[1],(double)de->ct[2]/zcounts[2],(double)de->ct[3]/zcounts[3]);
}
gt_output_file_close(file);
}
static void as_print_duplicate_summary(FILE *fp,as_param *param)
{
as_stats* st=param->stats[0];
fprintf(fp,"\nOverall duplicate percentage = %g%%\nOptical duplicate fraction = %g\nNo. read pairs used = %"PRIu64"\n",100.0*st->duplicate_rate[0],st->duplicate_rate[1],st->duplicate_reads_used);
fprintf(fp,"Estimated number of unique fragments in library = %"PRIu64"\n",st->unique_fragment_estimate);
}
static void as_print_detailed_duplicate_report(FILE *fp,as_param *param)
{
as_stats* st=param->stats[0];
int k=0;
int i;
double z=0.0;
for(i=0;i<=DUP_LIMIT;i++) {
uint64_t c=st->duplicate_counts[0][i];
if(c) {
z+=(double)c;
k=i;
}
}
if(k) {
fputs("\nDetailed duplicate report\nN_copies\tfreq\tprob\tW++\tW+-\tB++\tB+-\n",fp);
for(i=0;i<=k;i++) {
if(i==DUP_LIMIT) fputs(">=",fp);
fprintf(fp,"%d\t%"PRIu64"\t%g\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\n",i+1,st->duplicate_counts[0][i],(double)st->duplicate_counts[0][i]/z,
st->duplicate_counts[1][i],st->duplicate_counts[2][i],st->duplicate_counts[3][i],st->duplicate_counts[4][i]);
}
}
}
static void as_print_mismatch_report(FILE *fp,as_param *param)
{
const double ln10=log(10.0);
as_stats* st=param->stats[0];
bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr);
// Collect overall error stats
uint64_t mm_stats[2][MAX_QUAL+1],qual_stats[2][MAX_QUAL+1];
uint64_t mm_total[2]={0,0};
uint64_t qtotal[2]={0,0},total[2]={0,0},tv_stats[2]={0,0},ts_stats[2]={0,0},pbc_stats[2]={0,0},pbn_stats[2]={0,0};
uint64_t base_ct[2][5],base_ct1[2][5*(MAX_QUAL+1)],base_ctt[2][MAX_QUAL+1];
uint64_t i,j,k,nr;
nr=paired?2:1;
for(i=0;i<nr;i++) {
for(j=0;j<5;j++) base_ct[i][j]=0;
for(j=0;j<=MAX_QUAL;j++) {
uint64_t *tp=st->mm_stats[i*(MAX_QUAL+1)+j];
uint64_t *tq=st->qual_stats[i*(MAX_QUAL+1)+j];
uint64_t tmp_mm=0,tmp=0;
uint64_t tt[]={0,0,0,0,0};
for(k=0;k<st->max_read_length[i];k++) {
uint64_t *tb=st->base_counts_by_cycle[i][k]+j*5;
int k1;
for(k1=0;k1<5;k1++) tt[k1]+=tb[k1];
tmp_mm+=tp[k];
tmp+=tq[k];
}
uint64_t tt1=0;
for(k=0;k<5;k++) {
base_ct[i][k]+=tt[k];
base_ct1[i][j*5+k]=tt[k];
tt1+=tt[k];
}
base_ctt[i][j]=tt1;
tv_stats[i]+=st->tv_stats[i][j];
ts_stats[i]+=st->ts_stats[i][j];
pbc_stats[i]+=st->pbc_stats[i][j];
pbn_stats[i]+=st->pbn_stats[i][j];
mm_stats[i][j]=tmp_mm;
qual_stats[i][j]=tmp;
mm_total[i]+=tmp_mm;
qtotal[i]+=tmp;
}
for(j=0;j<5;j++) total[i]+=base_ct[i][j];
}
fputs("\nMismatch report (based on first alignment only)\n\n",fp);
if(paired) {
fprintf(fp,"Overall Mismatch percentage (Read 1 Read 2):\t%g%%\t%g%%\n",100.0*mm_total[0]/qtotal[0],100.0*mm_total[1]/qtotal[1]);
fprintf(fp,"Overall base composition:\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\n",
(double)base_ct[0][1]/total[0],(double)base_ct[0][2]/total[0],(double)base_ct[0][3]/total[0],(double)base_ct[0][4]/total[0],(double)base_ct[0][0]/total[0],
(double)base_ct[1][1]/total[1],(double)base_ct[1][2]/total[1],(double)base_ct[1][3]/total[1],(double)base_ct[1][4]/total[1],(double)base_ct[1][0]/total[1]);
fprintf(fp,"Overall transition:transversion ratio:\t%g\t%g\n",(double)ts_stats[0]/tv_stats[0],(double)ts_stats[1]/tv_stats[1]);
fprintf(fp,"Overall probability of mismatch being a copy of previous base:\t%g\t%g\n",(double)pbc_stats[0]/(pbc_stats[0]+pbn_stats[0]),(double)pbc_stats[1]/(pbc_stats[1]+pbn_stats[1]));
} else {
fprintf(fp,"Overall mismatch percentage:\t%g%%\n",100.0*mm_total[0]/qtotal[0]);
fprintf(fp,"Overall base composition:\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\n",
(double)base_ct[0][1]/total[0],(double)base_ct[0][2]/total[0],(double)base_ct[0][3]/total[0],(double)base_ct[0][4]/total[0],(double)base_ct[0][0]/total[0]);
fprintf(fp,"Overall transition:transversion ratio:\t%g\n",(double)ts_stats[0]/tv_stats[0]);
fprintf(fp,"Overall probability of mismatch being a copy of previous base:\t%g\n",(double)pbc_stats[0]/(pbc_stats[0]+pbn_stats[0]));
}
for(i=0;i<nr;i++) {
fprintf(fp,"\nMismatch quality profile - Read %"PRIu64"\n\n",i+1);
fputs("Qual\tn_bases\tp(bases)\tcp(bases)\tn_mm\tp(mm)\t-log10_p(mm)\tts:tv\tp(pbc)\tp(A)\tp(C)\tp(G)\tp(T)\n",fp);
uint64_t ttot=0;
for(j=0;j<=MAX_QUAL;j++) {
if(qual_stats[i][j]) {
double z=(double)mm_stats[i][j]/qual_stats[i][j];
ttot+=qual_stats[i][j];
uint64_t tt=st->pbc_stats[i][j]+st->pbn_stats[i][j];
fprintf(fp,"%"PRIu64"\t%"PRIu64"\t%g\t%g\t%"PRIu64"\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\n",j,qual_stats[i][j],(double)qual_stats[i][j]/qtotal[i],
(double)ttot/qtotal[i],mm_stats[i][j],z,-log(z)/ln10,st->tv_stats[i][j]?(double)st->ts_stats[i][j]/st->tv_stats[i][j]:0.0,
tt?(double)st->pbc_stats[i][j]/tt:0.0,(double)base_ct1[i][j*5+1]/base_ctt[i][j],(double)base_ct1[i][j*5+2]/base_ctt[i][j],
(double)base_ct1[i][j*5+3]/base_ctt[i][j],(double)base_ct1[i][j*5+4]/base_ctt[i][j]);
}
}
fprintf(fp,"\nMismatch read position profile - Read %"PRIu64"\n\n",i+1);
fputs("Pos\tn_bases\tp(bases)\tavg_qual\tn_mm\tp(mm)\tp(A)\tp(C)\tp(G)\tp(T)\tp(N)\n",fp);
uint64_t len=st->max_read_length[i];
for(j=0;j<len;j++) {
uint64_t mms[5]={0,0,0,0,0};
uint64_t *bc=st->base_counts_by_cycle[i][j];
uint64_t qs=0,mm=0;
double qsmn=0.0;
for(k=0;k<=MAX_QUAL;k++) {
mm+=st->mm_stats[i*(MAX_QUAL+1)+k][j];
qs+=st->qual_stats[i*(MAX_QUAL+1)+k][j];
qsmn+=(double)k*st->qual_stats[i*(MAX_QUAL+1)+k][j];
int k1;
for(k1=0;k1<5;k1++) mms[k1]+=bc[k*5+k1];
}
if(qs) {
qsmn/=(double)qs;
uint64_t tt=0;
for(k=0;k<5;k++) tt+=mms[k];
fprintf(fp,"%"PRIu64"\t%"PRIu64"\t%g\t%.1f\t%"PRIu64"\t%g\t%g\t%g\t%g\t%g\t%g\n",j,qs,(double)qs/qtotal[i],qsmn,mm,(double)mm/qs,
(double)mms[1]/tt,(double)mms[2]/tt,(double)mms[3]/tt,(double)mms[4]/tt,(double)mms[0]/tt);
}
}
}
}
static void as_print_stats(as_param *param)
{
gt_output_file *file;
if(param->output_file) {
file=gt_output_file_new_compress(param->output_file,UNSORTED_FILE,param->compress);
} else {
file=gt_output_stream_new_compress(stdout,UNSORTED_FILE,param->compress);
}
gt_cond_fatal_error(!file,FILE_OPEN,param->output_file);
FILE *fp=file->file;
if(gt_input_generic_parser_attributes_is_paired(param->parser_attr)) as_print_distance_file(param);
as_print_yield_summary(fp,param);
as_print_mapping_summary(fp,param);
as_print_duplicate_summary(fp,param);
as_print_mismatch_report(fp,param);
as_print_read_lengths(fp,param);
as_print_detailed_duplicate_report(fp,param);
gt_output_file_close(file);
}
int main(int argc,char *argv[])
{
int err=0,c;
char *p,*p1;
static struct option longopts[]={
{"reads",required_argument,0,'r'},
{"insert_dist",required_argument,0,'d'},
{"max_insert",required_argument,0,'m'},
{"min_insert",required_argument,0,'M'},
{"insert_dist",required_argument,0,'d'},
{"phage_lambda",required_argument,0,'P'},
{"phix174",required_argument,0,'X'},
{"paired",no_argument,0,'p'},
{"variable",no_argument,0,'V'},
{"ignore_id",no_argument,0,'i'},
{"mmap",no_argument,0,'w'},
{"fastq",no_argument,0,'F'},
{"solexa",no_argument,0,'S'},
{"gzip",no_argument,0,'z'},
{"bzip2",no_argument,0,'j'},
{"no-compress",no_argument,0,'Z'},
{"threads",required_argument,0,'t'},
{"qual_off",required_argument,0,'q'},
{"output",required_argument,0,'o'},
{"read_length",required_argument,0,'l'},
{"max_read_length",required_argument,0,'L'},
{"help",no_argument,0,'h'},
{"usage",no_argument,0,'h'},
{0,0,0,0}
};
as_param param = {
.input_files={NULL,NULL},
.output_file=NULL,
.dist_file=NULL,
.phage_lambda=NULL,
.phix174=NULL,
.mmap_input=false,
.parser_attr=gt_input_generic_parser_attributes_new(false),
.ignore_id=false,
.compress=NONE,
.min_insert=0,
.max_insert=DEFAULT_MAX_INSERT,
.variable_read_length=false,
.read_length={0,0},
.max_read_length=MAX_READ_LENGTH,
.num_threads=1,
.qual_offset=DEFAULT_QUAL_OFFSET,
};
while(!err && (c=getopt_long(argc,argv,"d:t:r:o:q:m:M:l:L:x:P:X:FSVzjZwpi?",longopts,0))!=-1) {
switch(c) {
case 'd':
set_opt("insert_dist",¶m.dist_file,optarg);
break;
case 'p':
gt_input_generic_parser_attributes_set_paired(param.parser_attr,true);
break;
case 'o':
set_opt("output",¶m.output_file,optarg);
break;
case 'P':
set_opt("phage_lambda",¶m.phage_lambda,optarg);
break;
case 'X':
set_opt("phix174",¶m.phix174,optarg);
break;
case 'L':
param.max_read_length=(uint64_t)strtoul(optarg,&p,10);
break;
case 'l':
param.read_length[0]=(uint64_t)strtoul(optarg,&p,10);
if(*p==',') param.read_length[1]=(uint64_t)strtoul(p+1,&p1,10);
else param.read_length[1]=param.read_length[0];
break;
case 'z':
#ifdef HAVE_ZLIB
param.compress=GZIP;
#endif
break;
case 'j':
#ifdef HAVE_BZLIB
param.compress=BZIP2;
#endif
break;
case 'Z':
param.compress=NONE;
break;
case 'q':
param.qual_offset=(int)strtol(optarg,&p,10);
if(*p || param.qual_offset<0 || param.qual_offset>255) {
fprintf(stderr,"Illegal quality value adjustment: '%s'\n",optarg);
err=-7;
}
break;
case 'm':
param.max_insert=(uint64_t)strtoul(optarg,&p,10);
break;
case 'M':
param.min_insert=(uint64_t)strtoul(optarg,&p,10);
break;
case 'F':
param.qual_offset=QUAL_FASTQ;
break;
case 'S':
param.qual_offset=QUAL_SOLEXA;
break;
case 'V':
param.variable_read_length=true;
break;
case 'w':
param.mmap_input=true;
break;
case 'i':
param.ignore_id=true;
break;
case 't':
#ifdef HAVE_OPENMP
param.num_threads=atoi(optarg);
#endif
break;
case 'r':
if(param.input_files[0]) {
fprintf(stderr,"multiple reads options: '%s' overwriting previous definition\n",optarg);
param.input_files[0]=0;
param.input_files[1]=0;
}
p=strchr(optarg,',');
if(p) {
*p++=0;
if(strchr(p,',')) {
fprintf(stderr,"Alignment files should be specified either in comma separated pairs (paired end) or individually (single end or paired alignment)\n");
err=-10;
} else {
param.input_files[0]=strdup(optarg);
param.input_files[1]=strdup(p);
}
} else {
param.input_files[0]=strdup(optarg);
}
break;
fprintf(stderr,"Alignment files should be specified either in comma separated pairs (paired end) or individually (single end or paired alignment)\n");
break;
case 'h':
case '?':
usage(stdout);
exit(0);
}
}
if(!param.phage_lambda) param.phage_lambda=strdup(PHAGE_LAMBDA);
if(!param.phix174) param.phix174=strdup(PHIX174);
as_set_output_files(¶m);
as_stats** stats=as_malloc(param.num_threads*sizeof(void *));
param.stats=stats;
loc_hash *lh=0;
// Do we have two map files as input (one for each read)?
if(param.input_files[1]) {
gt_input_generic_parser_attributes_set_paired(param.parser_attr,true);
pthread_mutex_t mutex=PTHREAD_MUTEX_INITIALIZER;
gt_input_file* input_file1=gt_input_file_open(param.input_files[0],param.mmap_input);
gt_input_file* input_file2=gt_input_file_open(param.input_files[1],param.mmap_input);
if(input_file1->file_format!=MAP || input_file2->file_format!=MAP) {
gt_fatal_error_msg("Fatal error: paired files '%s','%s' are not in MAP format\n",param.input_files[0],param.input_files[1]);
}
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(param.num_threads)
{
uint64_t tid=omp_get_thread_num();
#else
{
uint64_t tid=0;
#endif
gt_buffered_input_file* buffered_input1=gt_buffered_input_file_new(input_file1);
gt_buffered_input_file* buffered_input2=gt_buffered_input_file_new(input_file2);
gt_status error_code;
gt_template *template=gt_template_new();
id_tag *idt=new_id_tag();
stats[tid]=as_stats_new(gt_input_generic_parser_attributes_is_paired(param.parser_attr));
stats[tid]->loc_hash=&lh;
while(gt_input_map_parser_synch_blocks(buffered_input1,buffered_input2,&mutex)) {
error_code=gt_input_map_parser_get_template(buffered_input1,template,NULL);
if(error_code!=GT_IMP_OK) {
gt_input_map_parser_get_template(buffered_input2,template,NULL);
gt_error_msg("Error parsing file '%s'\n",param.input_files[0]);
continue;
}
if(gt_template_get_num_blocks(template)!=1) {
gt_error_msg("Error parsing files '%s','%s': wrong number of blocks\n",param.input_files[0],param.input_files[1]);
continue;
}
gt_alignment *alignment2=gt_template_get_block_dyn(template,1);
error_code=gt_input_map_parser_get_alignment(buffered_input2,alignment2,NULL);
if (error_code!=GT_IMP_OK) {
gt_error_msg("Error parsing file '%s'\n",param.input_files[1]);
continue;
}
if(!(gt_string_nequals(template->tag,alignment2->tag,gt_string_get_length(template->tag)))) {
gt_error_msg("Fatal ID mismatch ('%*s','%*s') parsing files '%s','%s'\n",PRIgts_content(template->tag),PRIgts_content(alignment2->tag),param.input_files[0],param.input_files[1]);
break;
}
if(!param.ignore_id) {
uint64_t idt_err=parse_id_tag(template->tag,idt);
if(idt_err!=ID_TAG_OK) {
gt_error_msg("Fatal error parsing ID '"PRIgts"'\n",PRIgts_content(template->tag));
break;
}
}
gt_alignment *alignment1=gt_template_get_block(template,0);
gt_mmap_attributes attr;
gt_map *mmap[2];
GT_ALIGNMENT_ITERATE(alignment1,map1) {
mmap[0]=map1;
GT_ALIGNMENT_ITERATE(alignment2,map2) {
mmap[1]=map2;
gt_status gt_err;
int64_t x=gt_template_get_insert_size(mmap,>_err,0,0);
if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK && x>=param.min_insert && x<=param.max_insert) {
attr.distance=gt_map_get_global_distance(map1)+gt_map_get_global_distance(map2);
attr.gt_score=GT_MAP_NO_GT_SCORE;
gt_template_inc_counter(template,attr.distance);
gt_template_add_mmap_ends(template,map1,map2,&attr);
}
}
}
as_collect_stats(template,stats[tid],¶m,idt);
}
gt_template_delete(template);
gt_buffered_input_file_close(buffered_input1);
gt_buffered_input_file_close(buffered_input2);
free_id_tag(idt);
}
gt_input_file_close(input_file1);
gt_input_file_close(input_file2);
} else { // Single file (could be single or paired end)
gt_input_file* input_file=param.input_files[0]?gt_input_file_open(param.input_files[0],param.mmap_input):gt_input_stream_open(stdin);
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(param.num_threads)
{
uint64_t tid=omp_get_thread_num();
#else
{
uint64_t tid=0;
#endif
gt_buffered_input_file* buffered_input=gt_buffered_input_file_new(input_file);
gt_status error_code;
gt_template *template=gt_template_new();
stats[tid]=as_stats_new(gt_input_generic_parser_attributes_is_paired(param.parser_attr));
stats[tid]->loc_hash=&lh;
id_tag *idt=new_id_tag();
while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,param.parser_attr))) {
if (error_code!=GT_IMP_OK) {
gt_error_msg("Error parsing file '%s'\n",param.input_files[0]);
continue;
}
// For paired reads, insert single end mappings into alignments
if(gt_input_generic_parser_attributes_is_paired(param.parser_attr)) {
if(gt_template_get_num_blocks(template)!=2) {
gt_fatal_error_msg("Fatal error: Expecting paired reads\n");
}
gt_alignment *al[2];
al[0]=gt_template_get_block(template,0);
al[1]=gt_template_get_block(template,1);
gt_alignment_recalculate_counters(al[0]);
gt_alignment_recalculate_counters(al[1]);
}
if(!param.ignore_id) {
uint64_t idt_err=parse_id_tag(template->tag,idt);
if(idt_err!=ID_TAG_OK) {
gt_error_msg("Fatal error parsing ID '"PRIgts"'\n",PRIgts_content(template->tag));
break;
}
}
as_collect_stats(template,stats[tid],¶m,idt);
}
// Clean
gt_template_delete(template);
gt_buffered_input_file_close(buffered_input);
free_id_tag(idt);
}
gt_input_file_close(input_file);
}
pthread_t stats_merge;
if(pthread_create(&stats_merge,NULL,as_merge_stats,¶m)) {
gt_error_msg("Fatal error - could not create new thread\n");
exit(-1);
}
pthread_t calc_dup;
if(pthread_create(&calc_dup,NULL,as_calc_duplicate_rate,¶m)) {
gt_error_msg("Fatal error - could not create new thread\n");
exit(-1);
}
pthread_join(calc_dup,NULL);
pthread_join(stats_merge,NULL);
as_print_stats(¶m);
as_stats_free(stats[0]);
free(stats);
return err;
}
|
test.c | #include <stdio.h>
#define N (8*1024)
#define C 4
#define P 2
#define M (N/C)
#define TEST_PAR_NOWAIT 1
#define TEST_PAR_MAP_ALL 1
#pragma omp requires unified_shared_memory
int a[N], b[N];
int main() {
int i;
int error, totError = 0;
#if TEST_PAR_NOWAIT
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target data map(alloc: a, b)
{
#pragma omp parallel for num_threads(P)
for(int i=0; i<C; i++) {
int lb = i*M;
int ub = (i+1)*M;
#pragma omp target nowait map(always to: b[lb:M]) map(always from: a[lb:M])
{
for(int j=lb; j<ub; j++) a[j] = b[j]+1;
}
} // par for
} // data map
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with TEST_PAR_NOWAIT conpleted successfully\n");
} else {
printf(" test with TEST_PAR_NOWAIT conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_PAR_MAP_ALL
// map all
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel for num_threads(P)
for(int i=0; i<C; i++) {
int lb = i*M;
int ub = (i+1)*M;
#pragma omp target nowait map(to: b[0:N]) map(always from: a[lb:M])
{
for(int j=lb; j<ub; j++) a[j] = b[j]+1;
}
} // par for
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with TEST_PAR_MAP_ALL conpleted successfully\n");
} else {
printf(" test with TEST_PAR_MAP_ALL conpleted with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
mandel_mpi.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <mpi.h>
#include <omp.h>
#include "pngwriter.h"
#include "consts.h"
#define index(x, y, lda) (y*lda + x)
unsigned long get_time ()
{
struct timeval tp;
gettimeofday (&tp, NULL);
return tp.tv_sec * 1000000 + tp.tv_usec;
}
int main (int argc, char** argv)
{
// Initialize MPI
MPI_Init(&argc, &argv);
int mpi_rank, mpi_size;
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// Create partitioning of the image
// determine 2D dimensions of the grid of processes
Partition p = createPartition(mpi_rank, mpi_size);
// Compute the local domain size and boundaries
// determine 2D dimensions of the local portion of the image
Domain d = createDomain(p);
if(mpi_rank == 0) printf("Processor grid size (%d, %d)\n", p.nx, p.ny);
printf("[Process %d]: Coordinates [%d, %d]\n", mpi_rank, p.x, p.y);
printf("[Process %d] Domain X: %d -> %d\n", mpi_rank, d.startx, d.endx);
printf("[Process %d] Domain Y: %d -> %d\n", mpi_rank, d.starty, d.endy);
/****************************************************************************/
// create image at the MASTER only
png_data* pPng = NULL;
if (mpi_rank == 0)
{
pPng = png_create (IMAGE_WIDTH, IMAGE_HEIGHT);
}
// Compute the global domain parameters
double fDeltaX = (MAX_X - MIN_X) / (double) IMAGE_WIDTH;
double fDeltaY = (MAX_Y - MIN_Y) / (double) IMAGE_HEIGHT;
// Allocate local image data
int *c;
if (mpi_rank == 0)
{
//allocate extra space at master
int extrax = IMAGE_WIDTH % p.nx;
int extray = IMAGE_HEIGHT % p.ny;
c = malloc((d.nx + extrax) * (d.ny + extray) * sizeof(int));
} else
{
c = malloc(d.nx*d.ny*sizeof(int));
}
/****************************************************************************/
// do the calculation
double x, y, x2, y2, cx, cy;
long nTotalIterationsCount = 0;
long i, j;
unsigned long nTimeStart = get_time ();
cy = MIN_Y + d.starty*fDeltaY;
#pragma omp parallel for private(cx,cy,x,y,x2,y2,n,i,j)
for (j = 0; j < d.ny; j++) // HEIGHT
{
cx = MIN_X + d.startx*fDeltaX;
for (i = 0; i < d.nx; i++) // WIDTH
{
x = cx;
y = cy;
x2 = x * x;
y2 = y * y;
// compute the orbit z, f(z), f²(z), f³(z), ...
// count the iterations until the orbit leaves the circle |z|=2.
// stop if the number of iterations exceeds the bound MAX_ITERS.
int n = 0;
for ( ; x2 + y2 < 4 && n < MAX_ITERS; n++, nTotalIterationsCount++)
{
// z = z² + c, where z = x + iy, c = cx + icy
y = 2 * x * y + cy;
x = x2 - y2 + cx;
x2 = x * x;
y2 = y * y;
}
// write the local pixel [i,j] -> j*d.ny + i
c[index(i,j,d.nx)] = ((long) n * 255) / MAX_ITERS;
cx += fDeltaX;
}
cy += fDeltaY;
}
unsigned long nTimeEnd = get_time ();
printf ("[Process %d] Total time: %g ms\n", mpi_rank, (nTimeEnd - nTimeStart) / 1000.0);
printf ("[Process %d] Image size: %ld x %ld = %ld Pixels\n", mpi_rank, (long) d.nx, (long) d.ny, (long) (d.nx * d.ny));
printf ("[Process %d] Total number of iterations: %ld\n", mpi_rank, nTotalIterationsCount);
printf ("[Process %d] Avg. time per pixel: %g µs\n", mpi_rank, (nTimeEnd - nTimeStart) / (double) (d.nx * d.ny));
printf ("[Process %d] Avg. time per iteration: %g µs\n", mpi_rank, (nTimeEnd - nTimeStart) / (double) nTotalIterationsCount);
printf ("[Process %d] Iterations/second: %g\n", mpi_rank, nTotalIterationsCount / (double) (nTimeEnd - nTimeStart) * 1e6);
// assume there are 8 floating point operations per iteration
printf ("[Process %d] MFlop/s: %g\n", mpi_rank, nTotalIterationsCount * 8.0 / (double) (nTimeEnd - nTimeStart));
// Send the data to the master
if (mpi_rank != 0)
{
// TODO: send local partition c to the master process
MPI_Ssend(c, d.ny * d.nx, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
/****************************************************************************/
// Write the image
if (mpi_rank == 0)
{
// first write master's own data
for (j = 0; j < d.ny; j++) // HEIGHT
{
for (i = 0; i < d.nx; i++) // WIDTH
{
int c_ij = c[index(i,j,d.nx)];
png_plot(pPng, i+d.startx, j+d.starty, c_ij ,c_ij, c_ij);
}
}
// receive and write the data from other processes
for (int proc = 1; proc < mpi_size; proc++)
{
Partition p1 = updatePartition(p, proc);
Domain d1 = createDomain(p1);
// TODO: receive partition of the process proc into array c (overwrite its data)
MPI_Recv(c, d.ny * d.nx, MPI_INT, proc, 0, MPI_COMM_WORLD, &status);
// write the partition of the process proc
for (j = 0; j < d1.ny; j++) // HEIGHT
{
for (i = 0; i < d1.nx; i++) // WIDTH
{
int c_ij = c[index(i,j,d1.nx)];
png_plot (pPng, i+d1.startx, j+d1.starty, c_ij ,c_ij, c_ij);
}
}
}
png_write (pPng, "mandel.png");
}
//TODO: uncomment after you implement createPartition(int mpi_rank, int mpi_size)
MPI_Comm_free(&p.comm);
free(c);
MPI_Finalize();
return 0;
}
|
hermv_c_csc_n_lo.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/util.h"
#include <memory.h>
static alphasparse_status_t
hermv_csc_n_lo_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
// m == n
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], y[i], beta);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT ail = aie - ais;
ALPHA_INT start = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if(start < aie && A->row_indx[start] == i){
ALPHA_Number tmp;
alpha_mul(tmp, alpha, A->values[start]);
alpha_madde(y_local[tid][i], tmp, x[i]);
start += 1;
}
const ALPHA_INT* A_row = &A->row_indx[ais];
const ALPHA_Number* A_val = &A->values[ais];
ALPHA_INT ai = start - ais;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for(; ai < ail-3; ai+=4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde(y_local[tid][ar0], av0, alpha_xi);
alpha_madde(y_local[tid][ar1], av1, alpha_xi);
alpha_madde(y_local[tid][ar2], av2, alpha_xi);
alpha_madde(y_local[tid][ar3], av3, alpha_xi);
alpha_mul_3c(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul_3c(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul_3c(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul_3c(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for(; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde(y_local[tid][ar], av, alpha_xi);
alpha_mul_3c(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT col = 0; col < m; col++)
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return hermv_csc_n_lo_unroll(alpha, A, x, beta, y);
}
|
boxloop_cuda.h | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_BOXLOOP_CUDA_HEADER
#define HYPRE_BOXLOOP_CUDA_HEADER
#define HYPRE_LAMBDA [=] __host__ __device__
/* TODO: RL: support 4-D */
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0, lsize1, lsize2;
HYPRE_Int strides0, strides1, strides2;
HYPRE_Int bstart0, bstart1, bstart2;
HYPRE_Int bsize0, bsize1, bsize2;
} hypre_Boxloop;
#ifdef __cplusplus
extern "C++"
{
#endif
/* -------------------------
* parfor-loop
* ------------------------*/
template <typename LOOP_BODY>
__global__ void
forall_kernel( LOOP_BODY loop_body,
HYPRE_Int length )
{
const HYPRE_Int idx = hypre_cuda_get_grid_thread_id<1, 1>();
/* const HYPRE_Int number_threads = hypre_cuda_get_grid_num_threads<1,1>(); */
if (idx < length)
{
loop_body(idx);
}
}
template<typename LOOP_BODY>
void
BoxLoopforall( HYPRE_Int length,
LOOP_BODY loop_body )
{
HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle());
if (exec_policy == HYPRE_EXEC_HOST)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (HYPRE_Int idx = 0; idx < length; idx++)
{
loop_body(idx);
}
}
else if (exec_policy == HYPRE_EXEC_DEVICE)
{
const dim3 bDim = hypre_GetDefaultCUDABlockDimension();
const dim3 gDim = hypre_GetDefaultCUDAGridDimension(length, "thread", bDim);
HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length );
}
}
/* ------------------------------
* parforreduction-loop
* -----------------------------*/
template <typename LOOP_BODY, typename REDUCER>
__global__ void
reductionforall_kernel( HYPRE_Int length,
REDUCER reducer,
LOOP_BODY loop_body )
{
const HYPRE_Int thread_id = hypre_cuda_get_grid_thread_id<1, 1>();
const HYPRE_Int n_threads = hypre_cuda_get_grid_num_threads<1, 1>();
for (HYPRE_Int idx = thread_id; idx < length; idx += n_threads)
{
loop_body(idx, reducer);
}
/* reduction in block-level and the save the results in reducer */
reducer.BlockReduce();
}
template<typename LOOP_BODY, typename REDUCER>
void
ReductionBoxLoopforall( HYPRE_Int length,
REDUCER & reducer,
LOOP_BODY loop_body )
{
if (length <= 0)
{
return;
}
HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle());
if (exec_policy == HYPRE_EXEC_HOST)
{
for (HYPRE_Int idx = 0; idx < length; idx++)
{
loop_body(idx, reducer);
}
}
else if (exec_policy == HYPRE_EXEC_DEVICE)
{
const dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(length, "thread", bDim);
/* Note: we assume gDim cannot exceed 1024
* and bDim < WARP * WARP
*/
gDim.x = hypre_min(gDim.x, 1024);
reducer.nblocks = gDim.x;
/*
hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", length, bDim.x, gDim.x);
*/
HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, length, reducer, loop_body );
}
}
#ifdef __cplusplus
}
#endif
/* Get 1-D length of the loop, in hypre__tot */
#define hypre_newBoxLoopInit(ndim, loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int hypre_d = 0; hypre_d < ndim; hypre_d ++) \
{ \
hypre__tot *= loop_size[hypre_d]; \
}
/* Initialize struct for box-k */
#define hypre_BoxLoopDataDeclareK(k, ndim, loop_size, dbox, start, stride) \
hypre_Boxloop databox##k; \
/* dim 0 */ \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = start[0] - dbox->imin[0]; \
databox##k.bsize0 = dbox->imax[0] - dbox->imin[0]; \
/* dim 1 */ \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = start[1] - dbox->imin[1]; \
databox##k.bsize1 = dbox->imax[1] - dbox->imin[1]; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
/* dim 2 */ \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = start[2] - dbox->imin[2]; \
databox##k.bsize2 = dbox->imax[2] - dbox->imin[2]; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = 0; \
databox##k.bsize0 = 0; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
/* RL: TODO loop_size out of box struct, bsize +1 */
/* Given input 1-D 'idx' in box, get 3-D 'local_idx' in loop_size */
#define hypre_newBoxLoopDeclare(box) \
hypre_Index local_idx; \
HYPRE_Int idx_local = idx; \
hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \
idx_local = idx_local / box.lsize0; \
hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \
idx_local = idx_local / box.lsize1; \
hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \
/* Given input 3-D 'local_idx', get 1-D 'hypre__i' in 'box' */
#define hypre_BoxLoopIncK(k, box, hypre__i) \
HYPRE_Int hypre_boxD##k = 1; \
HYPRE_Int hypre__i = 0; \
hypre__i += (hypre_IndexD(local_idx, 0) * box.strides0 + box.bstart0) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \
hypre__i += (hypre_IndexD(local_idx, 1) * box.strides1 + box.bstart1) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \
hypre__i += (hypre_IndexD(local_idx, 2) * box.strides2 + box.bstart2) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize2 + 1);
/* get 3-D local_idx into 'index' */
#define hypre_BoxLoopGetIndex(index) \
index[0] = hypre_IndexD(local_idx, 0); \
index[1] = hypre_IndexD(local_idx, 1); \
index[2] = hypre_IndexD(local_idx, 2);
/* BoxLoop 0 */
#define hypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_newBoxLoop0End() \
}); \
}
/* BoxLoop 1 */
#define hypre_newBoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1);
#define hypre_newBoxLoop1End(i1) \
}); \
}
/* BoxLoop 2 */
#define hypre_newBoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \
hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1); \
hypre_BoxLoopIncK(2, databox2, i2);
#define hypre_newBoxLoop2End(i1, i2) \
}); \
}
/* BoxLoop 3 */
#define hypre_newBoxLoop3Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim,loop_size, dbox1, start1, stride1); \
hypre_BoxLoopDataDeclareK(2, ndim,loop_size, dbox2, start2, stride2); \
hypre_BoxLoopDataDeclareK(3, ndim,loop_size, dbox3, start3, stride3); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1); \
hypre_BoxLoopIncK(2, databox2, i2); \
hypre_BoxLoopIncK(3, databox3, i3);
#define hypre_newBoxLoop3End(i1, i2, i3) \
}); \
}
/* BoxLoop 4 */
#define hypre_newBoxLoop4Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \
hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \
hypre_BoxLoopDataDeclareK(3, ndim, loop_size, dbox3, start3, stride3); \
hypre_BoxLoopDataDeclareK(4, ndim, loop_size, dbox4, start4, stride4); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1); \
hypre_BoxLoopIncK(2, databox2, i2); \
hypre_BoxLoopIncK(3, databox3, i3); \
hypre_BoxLoopIncK(4, databox4, i4);
#define hypre_newBoxLoop4End(i1, i2, i3, i4) \
}); \
}
/* Basic BoxLoops have no boxes */
/* BoxLoop 1 */
#define zypre_newBasicBoxLoop1Begin(ndim, loop_size, stride1, i1) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1);
/* BoxLoop 2 */
#define zypre_newBasicBoxLoop2Begin(ndim, loop_size, stride1, i1, stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \
zypre_BasicBoxLoopDataDeclareK(2, ndim, loop_size, stride2); \
BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1); \
hypre_BoxLoopIncK(2, databox2, i2); \
/* TODO: RL just parallel-for, it should not be here, better in utilities */
#define hypre_LoopBegin(size, idx) \
{ \
BoxLoopforall(size, HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_LoopEnd() \
}); \
}
/* Reduction BoxLoop1 */
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \
ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1);
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
}); \
}
/* Reduction BoxLoop2 */
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, reducesum) \
{ \
hypre_newBoxLoopInit(ndim, loop_size); \
hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \
hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \
ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1, databox1, i1); \
hypre_BoxLoopIncK(2, databox2, i2);
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
}); \
}
/* Renamings */
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin
#define hypre_BoxLoop0For hypre_newBoxLoop0For
#define hypre_BoxLoop0End hypre_newBoxLoop0End
#define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin
#define hypre_BoxLoop1For hypre_newBoxLoop1For
#define hypre_BoxLoop1End hypre_newBoxLoop1End
#define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin
#define hypre_BoxLoop2For hypre_newBoxLoop2For
#define hypre_BoxLoop2End hypre_newBoxLoop2End
#define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin
#define hypre_BoxLoop3For hypre_newBoxLoop3For
#define hypre_BoxLoop3End hypre_newBoxLoop3End
#define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin
#define hypre_BoxLoop4For hypre_newBoxLoop4For
#define hypre_BoxLoop4End hypre_newBoxLoop4End
#define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin
#define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin
#endif /* #ifndef HYPRE_BOXLOOP_CUDA_HEADER */
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#include <dmlc/omp.h>
#include "ctc_helper.h"
namespace mxnet_warpctc {
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
workspace_(workspace), blank_label_(blank_label) {
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
void* workspace_;
int blank_label_;
void log_softmax(const ProbT* const activations, ProbT* log_probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const log_probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* log_probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::log_softmax(const ProbT* const activations, ProbT* log_probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
denom += std::exp(activations[r + col_offset] - max_activation);
}
for(int r = 0; r < alphabet_size_; ++r) {
log_probs[r + col_offset] = activations[r + col_offset]
- max_activation - std::log(denom);
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const log_probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(log_probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, log_probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* log_probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = log_probs[labels[i]];
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + log_probs[blank_label_ + idx3];
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + log_probs[labels[i] + idx3];
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = log_probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)];
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
log_probs[idx3] == ctc_helper::neg_inf<ProbT>()) {
grad[idx3] = std::exp(log_probs[idx3]);
} else {
grad[idx3] = std::exp(log_probs[idx3])
- std::exp(output[i] - log_probs[idx3] - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + log_probs[labels[i] + idx3];
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + log_probs[blank_label_ + idx3];
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
log_probs[idx3] == ctc_helper::neg_inf<ProbT>()) {
grad[idx3] = std::exp(log_probs[idx3]);
} else {
grad[idx3] = std::exp(log_probs[idx3])
- std::exp(output[i] - log_probs[idx3] - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* log_probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
log_softmax(activations, log_probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
log_probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* log_probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
log_softmax(activations, log_probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(log_probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
} // mxnet_warpctc
|
pr67500.c | /* PR c/67500 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
#pragma omp declare simd simdlen(d) /* { dg-error "clause expression must be positive constant integer expression" } */
void f1 (int); /* { dg-error "undeclared here" "" { target *-*-* } .-1 } */
#pragma omp declare simd simdlen(0.5) /* { dg-error "clause expression must be positive constant integer expression" } */
void f2 (int);
#pragma omp declare simd simdlen(-2) /* { dg-error "clause expression must be positive constant integer expression" } */
void f3 (int);
#pragma omp declare simd simdlen(0) /* { dg-error "clause expression must be positive constant integer expression" } */
void f4 (int);
void
foo (int *p)
{
int i;
#pragma omp simd safelen(d) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i) /* { dg-error "undeclared" "" { target *-*-* } .-1 } */
;
#pragma omp simd safelen(0.5) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd safelen(-2) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd safelen(0) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(p:d) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(p:0.5) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(p:-2) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(p:0) /* { dg-error "must be positive constant integer expression" } */
for (i = 0; i < 16; ++i)
;
}
|
axpy_practice.c | /*
* AXPY Y[N] = Y[N] + a*X[N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#include <omp.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 102400
/* initialize a vector with random floating point numbers */
void init(REAL A[], int N) {
int i;
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
double check(REAL A[], REAL B[], int N) {
int i;
double sum = 0.0;
for (i = 0; i < N; i++) {
sum += fabs(A[i] - B[i]);
}
return sum;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a);
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
double elapsed; /* for timing */
double elapsed_omp; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: axpy <n> \n");
exit(1);
}
N = atoi(argv[1]);
REAL a = 123.456;
REAL Y_base[N];
REAL Y_omp[N];
REAL X[N];
srand48((1 << 12));
init(X, N);
init(Y_base, N);
memcpy(Y_omp, Y_base, N * sizeof(REAL));
/* example run */
elapsed = read_timer();
axpy_base(N, Y_base, X, a);
elapsed = (read_timer() - elapsed);
elapsed_omp = read_timer();
axpy_omp_parallel(N, Y_omp, X, a);
elapsed_omp = (read_timer() - elapsed_omp);
/* you should add the call to each function and time the execution */
printf("\tAXPY: Y[N] = Y[N] + a*X[N], N=%d\n", N);
printf("-----------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("-----------------------------------------------------------\n");
printf("axpy_base:\t\t%4f\t%4f \t\t%g\n", elapsed * 1.0e3, (2.0 * N) / (1.0e6 * elapsed), check(Y_base, Y_base, N));
printf("axpy_omp:\t\t%4f\t%4f \t\t%g\n", elapsed_omp * 1.0e3, (2.0 * N) / (1.0e6 * elapsed_omp), check(Y_base, Y_omp, N));
return 0;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
#define USE_CONSTRUCT
/* use openMP */
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a)
{
#ifdef USE_CONSTRUCT
int i;
#pragma omp parallel
#pragma omp for private(i)
for (i = 0; i < N; i++)
{
Y[i] += a * X[i];
}
#else
int Nthrds;
#pragma omp parallel shared(X, Y)
{
int id, i, istart, iend;
#pragma omp single
{
Nthrds = omp_get_num_threads();
};
id = omp_get_thread_num();
istart = id * N / Nthrds;
iend = (id + 1) * N / Nthrds;
for (i = istart; i < iend; i++)
{
Y[i] += a * X[i];
}
/* #pragma omp barrier
printf("Done from thread %d\n", id);*/
}
#endif
}
|
GB_unop__acos_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fp64_fp64)
// op(A') function: GB (_unop_tran__acos_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acos (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acos (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acos (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acos (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register Quantum
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].blue) > MagickEpsilon))
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/
density_x[z].direction[i].blue/density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].black) > MagickEpsilon))
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].alpha) > MagickEpsilon))
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator
% matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2).
% Next it searches this space for peaks in counts and converts the locations
% of the peaks to slope and intercept in the normal x,y input image space. Use
% the slope/intercepts to find the endpoints clipped to the bounds of the
% image. The lines are then drawn. The counts are a measure of the length of
% the lines.
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireMagickMemory((size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=PerceptibleReciprocal(count);
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
ConvolutionRules.h | // Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#ifndef CONVOLUTIONRULES_H
#define CONVOLUTIONRULES_H
#include "RectangularRegions.h"
template <Int dimension>
void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid,
SparseGrid<dimension> &outputGrid,
RuleBook &rules, long *size,
long *stride, long *inputSpatialSize,
long *outputSpatialSize) {
rules.resize(volume<dimension>(size));
for (auto const &inIter : inputGrid.mp) {
auto outRegion = OutputRegionCalculator<dimension>(
inIter.first, size, stride, outputSpatialSize);
for (auto j : outRegion) {
auto inRegion = InputRegionCalculator<dimension>(j, size, stride);
Int rulesOffset = inRegion.offset(inIter.first);
auto outIter = outputGrid.mp.find(j);
if (outIter == outputGrid.mp.end()) {
outIter =
outputGrid.mp.insert(std::make_pair(j, outputGrid.ctr++)).first;
}
rules[rulesOffset].push_back(inIter.second + inputGrid.ctr);
rules[rulesOffset].push_back(outIter->second);
}
}
}
template <Int dimension>
Int Convolution_InputSgsToRulesAndOutputSgs(SparseGrids<dimension> &input_SGs,
SparseGrids<dimension> &output_SGs,
RuleBook &rules, long *filterSize,
long *filterStride,
long *input_spatialSize,
long *output_spatialSize) {
rules.clear();
output_SGs.clear();
Int batchSize = input_SGs.size();
output_SGs.resize(batchSize);
Int output_nActive = 0;
for (Int i = 0; i < batchSize; i++) {
auto &iSG = input_SGs[i];
auto &oSG = output_SGs[i];
oSG.ctr = output_nActive;
Convolution_InputSgToRulesAndOutputSg<dimension>(
iSG, oSG, rules, filterSize, filterStride, input_spatialSize,
output_spatialSize);
output_nActive = oSG.ctr;
oSG.ctr = 0;
}
return output_nActive;
}
template <Int dimension>
Int Convolution_InputSgsToRulesAndOutputSgs_OMP(
SparseGrids<dimension> &input_SGs, SparseGrids<dimension> &output_SGs,
RuleBook &rules, long *filterSize, long *filterStride,
long *input_spatialSize, long *output_spatialSize) {
rules.clear();
rules.resize(volume<dimension>(filterSize));
output_SGs.clear();
Int batchSize = input_SGs.size();
output_SGs.resize(batchSize);
std::vector<RuleBook> rbs(batchSize);
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < batchSize; i++)
Convolution_InputSgToRulesAndOutputSg<dimension>(
input_SGs[i], output_SGs[i], rbs[i], filterSize, filterStride,
input_spatialSize, output_spatialSize);
}
Int output_nActive = 0;
for (Int i = 0; i < batchSize; i++) {
// Parallel assignment:
// output_nActive <- output_nActive+output_SGs[i].ctr
// output_SGs[i].ctr <- output_nActive
Int tmp = output_nActive;
output_nActive += output_SGs[i].ctr;
output_SGs[i].ctr = tmp;
}
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < (Int)rules.size(); i++) {
auto &R = rules[i];
for (Int j = 0; j < batchSize; j++) {
auto &r = rbs[j][i];
auto offset = output_SGs[j].ctr;
for (Int k = 0; k < (Int)r.size();) {
R.push_back(r[k++]);
R.push_back(r[k++] + offset);
}
}
}
}
return output_nActive;
}
// for each active site, list of (inputFeatureNumber,batchIdx, spatialOffset)
// triples
template <Int dimension>
void SparseToDense_InputSgsToRulesAndOutputSgs(
SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) {
Int batchSize = input_SGs.size();
rules.clear();
rules.resize(batchSize);
Point<dimension> lb, ub;
for (Int i = 0; i < dimension; ++i) {
lb[i] = 0;
ub[i] = spatialSize[i] - 1;
}
auto region = RectangularRegion<dimension>(lb, ub);
for (Int batchIdx = 0; batchIdx < batchSize; batchIdx++) {
auto &iSG = input_SGs[batchIdx];
for (auto const &inIter : iSG.mp) {
rules[batchIdx].push_back(inIter.second + iSG.ctr);
rules[batchIdx].push_back(region.offset(inIter.first));
}
}
}
template <Int dimension>
void SparseToDense_InputSgsToRulesAndOutputSgs_OMP(
SparseGrids<dimension> &input_SGs, RuleBook &rules, long *spatialSize) {
Int batchSize = input_SGs.size();
rules.clear();
rules.resize(batchSize);
Point<dimension> lb, ub;
for (Int i = 0; i < dimension; ++i) {
lb[i] = 0;
ub[i] = spatialSize[i] - 1;
}
auto region = RectangularRegion<dimension>(lb, ub);
Int batchIdx;
#pragma omp parallel for private(batchIdx)
for (batchIdx = 0; batchIdx < batchSize; batchIdx++) {
auto &iSG = input_SGs[batchIdx];
for (auto const &inIter : iSG.mp) {
rules[batchIdx].push_back(inIter.second + iSG.ctr);
rules[batchIdx].push_back(region.offset(inIter.first));
}
}
}
#endif /* CONVOLUTIONRULES_H */
|
GB_binop__iseq_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_int64
// A.*B function (eWiseMult): GB_AemultB__iseq_int64
// A*D function (colscale): GB_AxD__iseq_int64
// D*A function (rowscale): GB_DxB__iseq_int64
// C+=B function (dense accum): GB_Cdense_accumB__iseq_int64
// C+=b function (dense accum): GB_Cdense_accumb__iseq_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int64
// C=scalar+B GB_bind1st__iseq_int64
// C=scalar+B' GB_bind1st_tran__iseq_int64
// C=A+scalar GB_bind2nd__iseq_int64
// C=A'+scalar GB_bind2nd_tran__iseq_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__iseq_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lt_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_uint32
// A.*B function (eWiseMult): GB_AemultB__lt_uint32
// A*D function (colscale): GB_AxD__lt_uint32
// D*A function (rowscale): GB_DxB__lt_uint32
// C+=B function (dense accum): GB_Cdense_accumB__lt_uint32
// C+=b function (dense accum): GB_Cdense_accumb__lt_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_uint32
// C=scalar+B GB_bind1st__lt_uint32
// C=scalar+B' GB_bind1st_tran__lt_uint32
// C=A+scalar GB_bind2nd__lt_uint32
// C=A'+scalar GB_bind2nd_tran__lt_uint32
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT32 || GxB_NO_LT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <limits>
#include <vector>
#include <utility>
#include <string>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../../common/alm.h"
#include "elemwise_unary_op.h"
#if MXNET_USE_ONEDNN == 1
#include "operator/nn/dnnl/dnnl_power_scalar-inl.h"
#endif
namespace mxnet {
namespace op {
struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> {
double scalar;
bool is_int;
DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) {
DMLC_DECLARE_FIELD(scalar).set_default(1).describe("Scalar input value");
DMLC_DECLARE_FIELD(is_int).set_default(true).describe(
"Indicate whether scalar input is int type");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream scalar_s, is_int_s;
scalar_s << std::setprecision(std::numeric_limits<double>::max_digits10) << scalar;
is_int_s << is_int;
(*dict)["scalar"] = scalar_s.str();
(*dict)["is_int"] = is_int_s.str();
}
};
inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
if (common::is_int(in_attrs->at(0)) && !scalar_is_int) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64);
} else if (in_attrs->at(0) == mshadow::kBool) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64);
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
}
return out_attrs->at(0) != -1;
}
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes =
input.aux_data(rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row =
input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1] !=
row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
CHECK_EQ(output.shape(), input.shape());
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(
stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType* in = input.data().dptr<DType>();
const IType* column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType* row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row =
!last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter :
item_count - row_item_start_iter;
if (input_items_this_row) {
const IType* this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType* row_data_start = in + row_item_start_iter;
DType* output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template <typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu>* stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template <typename OP>
static void Compute_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
TBlob temp_tblob;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) ||
(inputs[0].type_flag_ == kBool)) {
Tensor<cpu, 1, DType> temp_tensor =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
Compute_<OP>(attrs, ctx, s, inputs, req, outputs);
}
template <typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
TBlob temp_tblob;
if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) {
Tensor<xpu, 1, double> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(temp_tblob.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename OP>
static void Backward_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<
mxnet::op::mxnet_op::op_with_req<mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>,
cpu>::Launch(s,
inputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
Backward_<OP>(attrs, s, inputs, req, outputs);
}
};
#if MXNET_USE_ONEDNN == 1
bool PowerStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* inputs,
std::vector<int>* outputs);
void PowerComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<mxnet::NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<mxnet::NDArray>& outputs);
#endif
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
.set_attr<mxnet::alm::FChangeLayout>("FChangeLayout", ElemwiseChangeLayout) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<FResourceRequest>( \
"FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_arguments(NumpyBinaryScalarParam::__FIELDS__())
#if MXNET_USE_CUDA
struct BinaryScalarRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
};
struct BinaryScalarRTCBackward {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
GB_binop__rdiv_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_01__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64)
// A*D function (colscale): GB (_AxD__rdiv_fc64)
// D*A function (rowscale): GB (_DxB__rdiv_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64)
// C=scalar+B GB (_bind1st__rdiv_fc64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc64)
// C=A+scalar GB (_bind2nd__rdiv_fc64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_div (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
acoustics.c | /*
* Student: Trascau Mihai
* Grupa: 344C4
*
* Lucrare: Ecuatia undelor pentru acustica 2D
* Fisier: acoustics.c
* Descriere: Fisier sursa in care este implementat tot programul folosind functiile descrise in celelalte fisiere sursa.
*/
#include "acoustics.h"
int main(int argc, char *argv[])
{
int i,j;
int err;
int numtask, rank;
time_t start_time;
MPI_Status status;
MPI_Datatype MPI_STRUCTURE, str_types[1];
MPI_Datatype MPI_SOURCE, src_types[1];
MPI_Datatype MPI_SCENARIO, scn_types[4];
MPI_Aint str_offsets[1], src_offsets[1], scn_offsets[4], extent;
int str_blockcounts[1], src_blockcounts[2], scn_blockcounts[4];
err = MPI_Init(&argc,&argv);
if(err != MPI_SUCCESS)
EXIT_ERROR("[EROARE] Initializare MPI esuata\n\n");
MPI_Comm_size(MPI_COMM_WORLD,&numtask);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
str_offsets[0] = 0;
str_types[0] = MPI_INT;
str_blockcounts[0] = 8;
MPI_Type_struct(1,str_blockcounts,str_offsets,str_types,&MPI_STRUCTURE);
MPI_Type_commit(&MPI_STRUCTURE);
src_offsets[0] = 0;
src_types[0] = MPI_INT;
src_blockcounts[0] = 3;
MPI_Type_struct(1,src_blockcounts,src_offsets,src_types,&MPI_SOURCE);
MPI_Type_commit(&MPI_SOURCE);
scn_offsets[0] = 0;
scn_types[0] = MPI_INT;
scn_blockcounts[0] = 5;
MPI_Type_extent(MPI_INT,&extent);
scn_offsets[1] = scn_offsets[0] + scn_blockcounts[0]*extent;
scn_types[1] = MPI_DOUBLE;
scn_blockcounts[1] = 4;
MPI_Type_extent(MPI_DOUBLE,&extent);
scn_offsets[2] = scn_offsets[1] + scn_blockcounts[1]*extent;
scn_types[2] = MPI_SOURCE;
scn_blockcounts[2] = 1;
MPI_Type_extent(MPI_SOURCE,&extent);
scn_offsets[3] = scn_offsets[2] + scn_blockcounts[2]*extent;
scn_types[3] = MPI_STRUCTURE;
scn_blockcounts[3] = MAX_STRUCTURES;
MPI_Type_struct(4,scn_blockcounts,scn_offsets,scn_types,&MPI_SCENARIO);
MPI_Type_commit(&MPI_SCENARIO);
if(rank == 0)
{
if(argc != 2)
EXIT_ERROR("[EROARE] Numar incorect de argumente. Folosire: ./acoustics <input_file_name>\n\n");
if(import_data(argv[1]))
EXIT_ERROR("[EROARE] Datele de intrare nu au putut fi incarcate\n\n");
for(i=1;i<numtask;i++)
MPI_Send(&num_scenarios,1,MPI_INT,i,1,MPI_COMM_WORLD);
}
else
MPI_Recv(&num_scenarios,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
if(rank == 0)
for(i=1;i<numtask;i++)
MPI_Send(&scenario,num_scenarios,MPI_SCENARIO,i,1,MPI_COMM_WORLD);
else
MPI_Recv(&scenario,num_scenarios,MPI_SCENARIO,0,1,MPI_COMM_WORLD,&status);
//print_import_data(rank);
scn_index = 0;
while(scn_index < num_scenarios)
{
int step = 0;
int source_active = 1;
start_time = time(NULL);
omp_set_num_threads(scenario[scn_index].OMP_THREADS);
load_scenario();
if(rank == 0)
{
local_ny = ny/numtask;
init_scenario(ny);
}
else if(rank != numtask-1)
{
local_ny = ny/numtask;
init_scenario(local_ny+2);
}
else
{
local_ny = ny - (numtask-1)*(ny/numtask);
init_scenario(local_ny+1);
}
recalculate_positions(rank,numtask);
int start, stop;
int radius = scenario[scn_index].source.radius;
while(step < (int)(MAX_TIME/TIME_STEP))
{
if(rank == 0) start = 0;
else start = 1;
if(rank == numtask-1 || rank == 0) stop = local_ny+1;
else stop = local_ny+2;
if(step < (int)(MAX_TIME/TIME_STEP)/2)
pulse_source(radius,step,scenario[scn_index].amp);
else if(source_active)
{
#pragma omp parallel for private(i,j)
for(i=start;i<stop;i++)
for(j=0;j<nx;j++)
{
if(is_source(i,j,radius,source_active))
uc[i][j] = ub[i][j] = ua[i][j] = 0;
}
source_active = 0;
}
m_compute_acoustics(rank,numtask,source_active,radius);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0)
{
MPI_Recv(uc[local_ny],nx,MPI_DOUBLE,1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny-1],nx,MPI_DOUBLE,1,1,MPI_COMM_WORLD);
}
else if(rank%2 == 1 && rank != numtask-1)
{
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD);
MPI_Recv(uc[local_ny+1],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD,&status);
}
else if(rank%2 == 0 && rank != numtask-1)
{
MPI_Recv(uc[local_ny+1],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
}
else if(rank == numtask-1)
{
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
}
if(step%SAVE_TIME == 1)
{
if(rank == 0)
{
for(i=1;i<numtask;i++)
{
if(i != numtask-1)
for(j=0;j<ny/numtask;j++)
MPI_Recv(uc[i*(ny/numtask)+j],nx,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
if(i == numtask-1)
for(j=(numtask-1)*(ny/numtask);j<ny;j++)
MPI_Recv(uc[j],nx,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
}
export_to_vtk(step);
}
else
for(i=1;i<local_ny+1;i++)
MPI_Send(uc[i],nx,MPI_DOUBLE,0,1,MPI_COMM_WORLD);
}
xchg = ua;
ua = ub;
ub = uc;
uc = xchg;
MPI_Barrier(MPI_COMM_WORLD);
step++;
}
unload_scenario();
if(rank == 0)
{
time_t stop = time(NULL);
double compute_time = difftime(stop,start_time);
export_to_gnuplot(scn_index,compute_time);
}
scn_index++;
}
MPI_Type_free(&MPI_SCENARIO);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
velocity_energy_cumulative.c | /*
=============================================================================
Copyright (c) 2013, Institute for Microelectronics, TU Wien
http://www.iue.tuwien.ac.at
-----------------
ViennaWD - The Vienna Wigner Decoherence Algorithms
Ensemble Monte Carlo Simulator
-----------------
authors: Marek Pobjecky
Mihail Nedjalkov nedjalkov@iue.tuwien.ac.at
license: see file LICENSE in the base directory
=============================================================================
*/
#include <stdio.h>
#include <math.h>
#include "emc.h"
#include <omp.h>
/********************************************************************/
/* Calculate cumulative velocities and energies of the particles */
/* (device independent) */
/********************************************************************/
currents_t oooVelocityEnergyCumulative(const_t constpar, geometry_t *geometry, scatpar_t *scatpar, el_data_t * particles, phys_quant_t *phys_quantities, double *Time, int *nTimeStepsAv)
{
static int i, j, n, iv, weight;
static double velx, vely, ee, denom, factor, qfactor, momentaryCur[MAXNX];
static double velxSum[MAXNX], velySum[MAXNY], eNumber[MAXNX], enerSum[MAXNX], currentSum[MAXNX]; /* 1D sums */
static double velxSum_2D[MAXNX][MAXNY], velySum_2D[MAXNX][MAXNY], eNumber_2D[MAXNX][MAXNY]; /* 2D sums */
static double counter = 0; /* counter = kcount1.kcounter */
currents_t currents;
/*=== Calculate instantaneous v(i), e(i) and currentdensity(i) ===*/
++counter;
for (i = 0; i <= geometry->nxmax; ++i)
{
eNumber[i] = 0.0;
velxSum[i] = 0.0;
velySum[i] = 0.0;
enerSum[i] = 0.0;
for (j = 0; j <= geometry->nymax; ++j)
{
eNumber_2D[i][j] = 0;
velxSum_2D[i][j] = 0.0;
velySum_2D[i][j] = 0.0;
}
}
// #pragma omp parallel
// #pragma omp for
#pragma omp for schedule(static, 1)
for (n = 0; n <= scatpar->n_used; ++n)
{
// i = (rintf) (particles[n].p[5] / geometry->meshSize);
i = (int) (particles[n].p[5] / geometry->meshSize);
j = (int) (particles[n].p[6] / geometry->meshSize);
if (i < 0) i = 0;
else if (i > geometry->nxmax) i = geometry->nxmax;
weight = particles[n].p[7];
iv = particles[n].ip;
ee = particles[n].energy;
denom = 1.0 / (constpar.af2 * ee + 1.0);
if (iv == 1)
{
velx = constpar.hm[0] * particles[n].p[1] * denom;
vely = constpar.hm[1] * particles[n].p[2] * denom;
}
else if (iv == 2)
{
velx = constpar.hm[1] * particles[n].p[1] * denom;
vely = constpar.hm[0] * particles[n].p[2] * denom;
}
else if (iv == 3)
{
velx = constpar.hm[2] * particles[n].p[1] * denom;
vely = constpar.hm[1] * particles[n].p[2] * denom;
}
else
{
printf("invalid iv = %d at particle index n = %d (n used = %d)\n", iv, n, scatpar->n_used);
}
velxSum[i] += velx * weight;
velySum[i] += vely * weight;
enerSum[i] += ee * weight;
eNumber[i] += weight;
velxSum_2D[i][j] += velx * weight;
velySum_2D[i][j] += vely * weight;
eNumber_2D[i][j] += weight;
}
qfactor = constpar.q / geometry->meshSize;
if (*Time < scatpar->transientTime)
{
for (i = 0; i <= geometry->nxmax; ++i)
{
currentSum[i] += velxSum[i] * qfactor;
momentaryCur[i] = velxSum[i] * qfactor;
}
}
else if (*Time == scatpar->transientTime)
{
counter = 1;
for (i = 0; i <= geometry->nxmax; ++i)
{
currentSum[i] = 0.0;
phys_quantities->curSumX[i][j] = 0.0;
phys_quantities->curSumY[i][j] = 0.0;
currentSum[i] += velxSum[i] * qfactor;
momentaryCur[i] = velxSum[i] * qfactor;
if (eNumber[i] != 0.0)
{
factor = 1.0 / eNumber[i]; /* number of electrons */
phys_quantities->velxSum[i] += velxSum[i] * factor; /* mean velocity x */
phys_quantities->velySum[i] += velySum[i] * factor; /* mean velocity y */
phys_quantities->enerSum[i] += enerSum[i] * factor; /* mean energy */
}
}
for (j = 0; j <= geometry->nymax; ++j)
for (i = 0; i <= geometry->nxmax; ++i)
{
phys_quantities->elSum[i][j] = eNumber_2D[i][j];
phys_quantities->curSumX[i][j] += velxSum_2D[i][j] * qfactor;
phys_quantities->curSumY[i][j] += velySum_2D[i][j] * qfactor;
}
}
else /* if (*Time > scatpar->transientTime) */
{
for (i = 0; i <= geometry->nxmax; ++i)
{
currentSum[i] += velxSum[i] * qfactor;
momentaryCur[i] = velxSum[i] * qfactor;
if (eNumber[i] != 0.0)
{
factor = 1.0 / eNumber[i];
phys_quantities->velxSum[i] += velxSum[i] * factor;
phys_quantities->velySum[i] += velySum[i] * factor;
phys_quantities->enerSum[i] += enerSum[i] * factor;
}
}
for (j = 0; j <= geometry->nymax; ++j)
for (i = 0; i <= geometry->nxmax; ++i)
{
phys_quantities->elSum[i][j] = (phys_quantities->elSum[i][j] * (counter - 1)
+ eNumber_2D[i][j]) / counter;
phys_quantities->curSumX[i][j] += velxSum_2D[i][j] * qfactor;
phys_quantities->curSumY[i][j] += velySum_2D[i][j] * qfactor;
}
}
/*=== calculate cumulative and momentary source & drain currents ===*/
denom = 1.0 / (counter * geometry->deviceWidth);
currents.Is_cumul = currentSum[geometry->nxmax / 2] * denom;
currents.Id_cumul = currentSum[geometry->nxmax / 2] * denom;
// currents.Is_cumul = currentSum[geometry->ns + 3] * denom;
// currents.Id_cumul = currentSum[geometry->nd - 3] * denom;
// currents.Is_momentary = momentaryCur[geometry->ns + 3] / geometry->deviceWidth;
// currents.Id_momentary = momentaryCur[geometry->nd - 3] / geometry->deviceWidth;
return currents;
}
|
omp_task_if.c | <ompts:test>
<ompts:testdescription>Test which checks the if clause of the omp task directive. The idear of the tests is to generate a tasks in a single region and pause it immediately. The parent thread now shall set a counter variable which the paused task shall evaluate when woke up.</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp task if</ompts:directive>
<ompts:dependences>omp single,omp flush</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_task_if</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int condition_false;
int count;
int result;
</ompts:orphan:vars>
count=0;
condition_false = (logFile == NULL);
#pragma omp parallel
{
#pragma omp single
{
<ompts:orphan>
#pragma omp task <ompts:check>if (condition_false)</ompts:check> shared(count, result)
{
my_sleep (SLEEPTIME_LONG);
//#pragma omp flush (count)
result = (0 == count);
} /* end of omp task */
</ompts:orphan>
count = 1;
//#pragma omp flush (count)
} /* end of single */
} /*end of parallel */
return result;
}
</ompts:testcode>
</ompts:test>
|
atomic-3.c | /* { dg-do run } */
/* { dg-options "-O0" } */
#include <omp.h>
#include <stdlib.h>
short e[64];
int g;
_Complex double d, f;
int num_threads;
__attribute__((noinline)) void
foo (int x, long long y)
{
#pragma omp parallel num_threads (4)
{
int i;
#pragma omp barrier
for (i = 0; i < 2400; i++)
{
if (i == 0)
num_threads = omp_get_num_threads ();
#pragma omp atomic
e[0] += x;
#pragma omp atomic
e[16] += x;
#pragma omp atomic
g += y;
#pragma omp atomic
__real__ d += x;
#pragma omp atomic
__imag__ f += x;
}
}
}
int
main (void)
{
int i;
foo (3, 3LL);
if (g != 3 * 2400 * num_threads
|| __real__ d != g || __imag__ d != 0
|| __real__ f != 0 || __imag__ f != g)
abort ();
for (i = 0; i < 64; i++)
if (e[i] != ((i && i != 16) ? 0 : g))
abort ();
return 0;
}
|
array.h | /** @file array.h
@brief A brief, one sentence description.
A more detailed multiline description...
@author Peter Drysdale, Felix Fung,
*/
#ifndef NFTSIM_SRC_ARRAY_H
#define NFTSIM_SRC_ARRAY_H
// C++ standard library headers
#include <vector> // std::vector;
template<class T>
class Array {
Array(const Array&); // No copy constructor allowed.
std::vector<T*> m;
public:
using size_type = typename std::vector<T>::size_type;
virtual void step();
virtual void pstep(); // parallel for loop over elements::loop
void add(T* t);
void add(std::vector<T*> t);
bool empty() const;
inline T* operator[]( size_type index ) const;
size_type size() const;
Array<T>();
virtual ~Array();
};
template<class T>
void Array<T>::add( T* t ) {
m.push_back(t);
}
template<class T>
void Array<T>::add( std::vector<T*> t ) {
for( size_type i=0; i<t.size(); i++ ) {
m.push_back( t[i] );
}
}
template<class T>
bool Array<T>::empty() const {
return m.empty();
}
template<class T>
void Array<T>::step() {
for( size_type i=0; i<m.size(); i++ ) {
m[i]->step();
}
}
template<class T>
void Array<T>::pstep() {
// Note pstep() is needed as well as step() because output must use
// step so that it is not parallelized
//#pragma omp parallel for num_threads(5)
for( size_type i=0; i<m.size(); i++ ) {
m[i]->step();
}
}
template<class T>
Array<T>::Array() = default;
template<class T>
Array<T>::~Array() {
for( size_type i=0; i<m.size(); i++ ) {
if( m[i] ) {
delete m[i];
}
}
}
template<class T>
T* Array<T>::operator[]( size_type index ) const {
return m[index];
}
template<class T>
typename Array<T>::size_type Array<T>::size() const {
return m.size();
}
#endif //NFTSIM_SRC_ARRAY_H
|
TemporalRowConvolution.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/TemporalRowConvolution.c"
#else
static inline void THNN_(TemporalRowConvolution_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *weight,
THTensor *bias,
int kW,
int dW,
int padW) {
THArgCheck(kW > 0, 5,
"kernel size should be greater than zero, but got kW: %d", kW);
THArgCheck(dW > 0, 6,
"stride should be greater than zero, but got dW: %d", dW);
THNN_ARGCHECK(weight->nDimension == 3, 3, weight,
"3D weight tensor expected, but got: %s");
THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
}
// we're always looking at (possibly batch) x feats x seq
int ndim = input->nDimension;
int dimF = 0;
int dimS = 1;
if (ndim == 3) {
++dimS;
++dimF;
}
THNN_ARGCHECK(ndim == 2 || ndim == 3, 1, input,
"2D or 3D (batch mode) input tensor expected, but got :%s");
int64_t inputFrameSize = weight->size[0];
int64_t nInputFrame = input->size[dimS];
int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1;
if (nOutputFrame < 1) {
THError("Given input size: (%d x %d). "
"Calculated output size: (%d x %d). Output size is too small",
inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame);
}
}
static void THNN_(unfolded_acc_row)(
THTensor *finput,
THTensor *input,
int kW,
int dW,
int padW,
int64_t inputFrameSize,
int64_t nInputFrame,
int64_t nOutputFrame) {
size_t c;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
// #pragma omp parallel for private(c)
for (c = 0; c < inputFrameSize; c++) {
size_t kw, x;
size_t ix = 0;
for (kw = 0; kw < kW; kw++) {
real *src = finput_data
+ c * (kW * nOutputFrame)
+ kw * (nOutputFrame);
real *dst = input_data + c * (nInputFrame);
ix = (size_t)(kw);
if (dW == 1) {
real *dst_slice = dst + (size_t)(ix);
THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame);
} else {
for (x = 0; x < nOutputFrame; x++) {
real *dst_slice = dst + (size_t)(ix + x * dW);
THVector_(cadd)(dst_slice, dst_slice,
src + (size_t)(x), 1, 1);
}
}
}
}
}
static void THNN_(unfolded_copy_row)(
THTensor *finput,
THTensor *input,
int kW,
int dW,
int padW,
int64_t inputFrameSize,
int64_t nInputFrame,
int64_t nOutputFrame) {
int64_t k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
// #pragma omp parallel for private(k)
for (k = 0; k < inputFrameSize * kW; k++) {
size_t c = k / kW;
size_t rest = k % kW;
size_t kw = rest % kW;
size_t x;
size_t ix;
real *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame);
real *src = input_data + c * (nInputFrame);
ix = (size_t)(kw);
if (dW == 1) {
memcpy(dst, src+(size_t)(ix), sizeof(real) * (nOutputFrame));
} else {
for (x = 0; x < nOutputFrame; x++) {
memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW),
sizeof(real) * 1);
}
}
}
}
static void THNN_(TemporalRowConvolution_updateOutput_frame)(
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kW,
int dW,
int padW,
int64_t inputFrameSize,
int64_t nInputFrame,
int64_t nOutputFrame) {
int64_t i;
THTensor *output3d = THTensor_(newWithStorage3d)(
output->storage, output->storageOffset,
inputFrameSize, -1,
1, -1,
nOutputFrame, -1);
THNN_(unfolded_copy_row)(finput, input, kW, dW, padW,
inputFrameSize, nInputFrame, nOutputFrame);
THTensor_(zero)(output);
if (bias != NULL) {
for (i = 0; i < inputFrameSize; i++)
THVector_(fill)
(output->storage->data + output->storageOffset
+ output->stride[0] * i,
THTensor_(get1d)(bias, i), nOutputFrame);
}
THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput);
THTensor_(free)(output3d);
}
void THNN_(TemporalRowConvolution_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput, // unused here but needed for Cuda
int kW,
int dW,
int padW,
bool featFirst) {
int ndim = input->nDimension;
THTensor *tinput;
if (!featFirst) {
tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2);
input = THTensor_(newContiguous)(tinput);
} else {
input = THTensor_(newContiguous)(input);
}
THNN_(TemporalRowConvolution_shapeCheck)(
state, input, NULL, weight, bias, kW, dW, padW);
int64_t inputFrameSize = weight->size[0];
int64_t nInputFrame = input->size[ndim - 1];
int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1;
if (ndim == 2) { /* non-batch mode */
THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame);
THTensor_(resize2d)(output, inputFrameSize, nOutputFrame);
THTensor_(zero)(finput);
THTensor_(zero)(output);
THNN_(TemporalRowConvolution_updateOutput_frame)
(input, output, weight, bias, finput,
kW, dW, padW,
inputFrameSize, nInputFrame, nOutputFrame);
} else {
int64_t T = input->size[0];
int64_t t;
THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame);
THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame);
THTensor_(zero)(finput);
THTensor_(zero)(output);
#pragma omp parallel for private(t)
for (t = 0; t < T; t++) {
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(TemporalRowConvolution_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
if (!featFirst) { // NOTE: output will NOT be contiguous in this case
THTensor_(transpose)(output, output, ndim - 1, ndim - 2);
THTensor_(free)(tinput);
}
THTensor_(free)(input);
}
static void THNN_(TemporalRowConvolution_updateGradInput_frame)(
THTensor *gradInput,
THTensor *gradOutput,
THTensor *weight,
THTensor *fgradInput,
int kW,
int dW,
int padW,
int64_t inputFrameSize,
int64_t nInputFrame,
int64_t nOutputFrame) {
THTensor *gradOutput3d = THTensor_(newWithStorage3d)(
gradOutput->storage, gradOutput->storageOffset,
inputFrameSize, -1,
1, -1,
nOutputFrame, -1);
// weight: inputFrameSize x kW x 1
// gradOutput3d: inputFrameSize x 1 x nOutputFrame
THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d);
// fgradInput: inputFrameSize x kW x nOutputFrame
THTensor_(free)(gradOutput3d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc_row)(fgradInput, gradInput,
kW, dW, padW,
inputFrameSize, nInputFrame, nOutputFrame);
}
void THNN_(TemporalRowConvolution_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW,
int dW,
int padW,
bool featFirst) {
int ndim = input->nDimension;
THTensor *tinput, *tgradOutput;
if (!featFirst) {
tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2);
tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2);
input = THTensor_(newContiguous)(tinput);
gradOutput = THTensor_(newContiguous)(tgradOutput);
} else {
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
}
THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight,
NULL, kW, dW, padW);
int64_t inputFrameSize = weight->size[0];
int64_t nInputFrame = input->size[ndim - 1];
int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1;
THTensor_(resizeAs)(fgradInput, finput);
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(fgradInput);
THTensor_(zero)(gradInput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 1, 2);
if (ndim == 2) {
THNN_(TemporalRowConvolution_updateGradInput_frame)
(gradInput, gradOutput, tweight, fgradInput,
kW, dW, padW,
inputFrameSize, nInputFrame, nOutputFrame);
} else {
int64_t T = input->size[0];
int64_t t;
#pragma omp parallel for private(t)
for (t = 0; t < T; t++) {
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(TemporalRowConvolution_updateGradInput_frame)
(gradInput_t, gradOutput_t, tweight, fgradInput_t,
kW, dW, padW,
inputFrameSize, nInputFrame, nOutputFrame);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case
THTensor_(free)(tinput);
THTensor_(free)(tgradOutput);
THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2);
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
}
static void THNN_(TemporalRowConvolution_accGradParameters_frame)(
THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
THTensor *finput, real scale) {
int64_t i;
THTensor *gradOutput3d = THTensor_(newWithStorage3d)(
gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
1, -1,
gradOutput->size[1], -1);
THTensor *tfinput = THTensor_(new)();
THTensor_(transpose)(tfinput, finput, 1, 2);
// gradOutput3d: inputFrameSize x 1 x nOutputFrame
// finput: inputFrameSize x nOutputFrame x kW
THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput);
// gradWeight: inputFrameSize x 1 x kW
THTensor_(free)(tfinput);
if (gradBias != NULL) {
for (i = 0; i < gradBias->size[0]; i++) {
int64_t k;
real sum = 0;
real *data = gradOutput3d->storage->data
+ gradOutput3d->storageOffset
+ i * gradOutput3d->stride[0];
for (k = 0; k < gradOutput3d->size[2]; k++) {
sum += data[k];
}
(gradBias->storage->data + gradBias->storageOffset)[i]
+= scale * sum;
}
}
THTensor_(free)(gradOutput3d);
}
void THNN_(TemporalRowConvolution_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
THTensor *fgradInput,
int kW,
int dW,
int padW,
bool featFirst,
accreal scale_) {
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int ndim = input->nDimension;
THTensor *tinput, *tgradOutput;
if (!featFirst) {
tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2);
tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2);
input = THTensor_(newContiguous)(tinput);
gradOutput = THTensor_(newContiguous)(tgradOutput);
} else {
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
}
THNN_(TemporalRowConvolution_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kW, dW, padW);
int64_t inputFrameSize = gradWeight->size[0];
int64_t nInputFrame = input->size[ndim - 1];
int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1;
if (ndim == 2) {
THNN_(TemporalRowConvolution_accGradParameters_frame)(
gradOutput, gradWeight, gradBias, finput, scale);
} else {
int64_t T = input->size[0];
int64_t t;
for (t = 0; t < T; t++) {
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(TemporalRowConvolution_accGradParameters_frame)(
gradOutput_t, gradWeight, gradBias, finput_t, scale);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
if (!featFirst) {
THTensor_(free)(tinput);
THTensor_(free)(tgradOutput);
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
}
#endif
|
GB_binop__isgt_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8)
// A*D function (colscale): GB (_AxD__isgt_uint8)
// D*A function (rowscale): GB (_DxB__isgt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8)
// C=scalar+B GB (_bind1st__isgt_uint8)
// C=scalar+B' GB (_bind1st_tran__isgt_uint8)
// C=A+scalar GB (_bind2nd__isgt_uint8)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_uint16
// op(A') function: GB_tran__lnot_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_uint16
(
uint8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Jacobi2D-NaiveParallelSpaceTiled-OMP.test.c | /******************************************************************************
* Jacobi2D benchmark
* Basic parallelisation with OpenMP
*
* Usage:
* make omp
* export OMP_NUM_THREADS=8
* bin/Jacobi2D-NaiveParallel-OMP \
* `cat src/Jacobi2D-NaiveParallel-OMP.perfexecopts`
* For a run on 8 threads
******************************************************************************/
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdbool.h>
#include <ctype.h>
#include <math.h>
#include <assert.h>
#define STENCIL(read,write,x,y) space[write][x][y] = \
( space[read][x-1][y] +\
space[read][x][y] +\
space[read][x+1][y] +\
space[read][x][y+1] +\
space[read][x][y-1] )/5;
#include "util.h"
// main
// Stages
// 1 - command line parsing
// 2 - data allocation and initialization
// 3 - jacobi 1D timed within an openmp loop
// 4 - output and optional verification
int main( int argc, char* argv[] ){
// rather than calling fflush
setbuf(stdout, NULL);
// 1 - command line parsing
Params cmdLineArgs;
parseCmdLineArgs(&cmdLineArgs,argc,argv);
// 1a - figure out how many complete tiles
// and what this size of any incomplete tiles
// are gonna be
int tileCountX = cmdLineArgs.problemSize/cmdLineArgs.tile_len_x;
int tileCountY = cmdLineArgs.problemSize/cmdLineArgs.tile_len_y;
if(cmdLineArgs.problemSize % cmdLineArgs.tile_len_x != 0){
tileCountX += 1;
}
if(cmdLineArgs.problemSize % cmdLineArgs.tile_len_y != 0){
tileCountY += 1;
}
// 2 - data allocation and initialization
int lowerBound = 1;
int upperBound = lowerBound + cmdLineArgs.problemSize - 1;
double** space[2];
int i;
// allocate x axis
space[0] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*));
space[1] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate x axis of space array\n" );
exit(0);
}
// allocate y axis
for( i = 0; i < cmdLineArgs.problemSize + 2; ++i ){
space[0][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double));
space[1][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double));
if( space[0][i] == NULL || space[1][i] == NULL ){
printf( "Could not allocate y axis of space array\n" );
exit(0);
}
}
// use global seed to seed the random number gen (will be constant)
srand(cmdLineArgs.globalSeed);
// first touch for openmp
int x, y;
int itx, ity;
#pragma omp parallel for private( x, y ) schedule(dynamic)
for(itx=0; itx< tileCountX; itx++){
for(ity=0; ity < tileCountY; ity++ ){
int xlb = lowerBound+itx*cmdLineArgs.tile_len_x;
int xub = min(upperBound,lowerBound+itx*cmdLineArgs.tile_len_x+
cmdLineArgs.tile_len_x);
for( x = xlb; x <= xub; ++x ){
int ylb = lowerBound+ity*cmdLineArgs.tile_len_y;
int yub = min(lowerBound+ity*cmdLineArgs.tile_len_y+
cmdLineArgs.tile_len_y,upperBound);
for( y = ylb; y <= yub;++y ){
space[0][x][y] = 0;
space[1][x][y] = 0;
}
}
}
}
// seed the space.
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
space[0][x][y] = rand() / (double)rand();
}
}
// set halo values (sanity)
for( i = 0; i < cmdLineArgs.problemSize + 2; ++i){
space[0][i][0] = 0;
space[1][i][0] = 0;
space[0][i][cmdLineArgs.problemSize + 1] = 0;
space[1][i][cmdLineArgs.problemSize + 1] = 0;
space[0][0][i] = 0;
space[1][0][i] = 0;
space[0][cmdLineArgs.problemSize + 1][i] = 0;
space[1][cmdLineArgs.problemSize + 1][i] = 0;
}
// 3 - jacobi 2D timed within an openmp loop
double start_time = omp_get_wtime();
int t,read=0,write=1;
int lbplusx = lowerBound+cmdLineArgs.tile_len_x;
int xtmp = cmdLineArgs.tile_len_x;
int lbplusy = lowerBound+cmdLineArgs.tile_len_y;
int ytmp = cmdLineArgs.tile_len_y;
for( t = 1; t <= cmdLineArgs.T; ++t ){
#pragma omp parallel for private( x, y ) schedule(dynamic)
for(itx=0; itx< tileCountX; itx++){
for(ity=0; ity < tileCountY; ity++ ){
int xlb = lowerBound+itx*xtmp;
int xub = min(upperBound,lbplusx+itx*xtmp);
for( x = xlb; x <= xub; ++x ){
int ylb = lowerBound+ity*ytmp;
int yub = min(lbplusy+ity*ytmp, upperBound);
for( y = ylb; y <= yub;++y ){
STENCIL( read, write, x, y);
}
}
}
}
read = write;
write = 1 - write;
}
double end_time = omp_get_wtime();
double time = (end_time - start_time);
// 4 - output and optional verification
if( cmdLineArgs.printtime ){
/*
printf( "Threads: %d, P: %d, Tile: %d,%d, ",cmdLineArgs.cores,
cmdLineArgs.problemSize,
cmdLineArgs.tile_len_x,
cmdLineArgs.tile_len_y);
*/
printf( "Time: %f", time );
}
if( cmdLineArgs.verify ){
if(!verifyResultJacobi2D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize,
cmdLineArgs.globalSeed,cmdLineArgs.T )){
fprintf(stderr,"FAILURE\n");
}else{
fprintf(stderr,"SUCCESS\n");
}
}
}
|
GB_binop__lt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int64)
// A*D function (colscale): GB (_AxD__lt_int64)
// D*A function (rowscale): GB (_DxB__lt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int64)
// C=scalar+B GB (_bind1st__lt_int64)
// C=scalar+B' GB (_bind1st_tran__lt_int64)
// C=A+scalar GB (_bind2nd__lt_int64)
// C=A'+scalar GB (_bind2nd_tran__lt_int64)
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT64 || GxB_NO_LT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor,ResInnerStride>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper;
LhsMapper lhs(_lhs, lhsStride);
RhsMapper rhs(_rhs, rhsStride);
ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
#endif
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
if (dst.cols() == 1)
{
// Fallback to GEMV if either the lhs or rhs is a runtime vector
typename Dest::ColXpr dst_vec(dst.col(0));
return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
}
else if (dst.rows() == 1)
{
// Fallback to GEMV if either the lhs or rhs is a runtime vector
typename Dest::RowXpr dst_vec(dst.row(0));
return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
}
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
stresslet_real_rc.c | #include "stresslet_real_rc.h"
#ifdef BEENAKKER
#include "beenakker_op_fd.h"
#else
#error "Must provide -D<method> to compiler"
#endif
#define SWAP(x,y) { tmp=x;x=y;y=tmp; }
static void quicksort(int* restrict list, int* restrict slave, int m, int n);
static void build_cell_list(
// Input
const double* restrict x,
const int N,
const double* restrict box,
const double rc,
// Output
double* rn_p,
int ncell[3],
int* restrict *ll_p,
int* restrict *head_p
);
static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs);
// ==== GENERATE TRIPLETS FOR MATRIX ASSEMBLY
void get_rs_triplets (const double* restrict x, const double* restrict nvec, const int N,
const double* restrict box, const double xi, const double rc, const int nlhs,
int* restrict *row_p, int* restrict *col_p, double* restrict val[3][3],
int* restrict *buck_size_p, int* restrict *idx_in_array_p, int* numel_p
)
{
// Setup output variables
int* restrict row;
int* restrict col;
int* restrict idx_in_array;
int* restrict buck_size;
// Setup variables
int i,j;
int ncell[3];
int* restrict ll;
int* restrict head;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
struct timeval tic, toc;
gettimeofday(&tic, NULL);
double time_spent;
// Build cell list
build_cell_list(x, N, box, rc, &rn, ncell, &ll, &head);
if(VERBOSE)
{
__PRINTF("[RSRC] SPARSE MATRIX\n");
__PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi);
__PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn);
__PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n",
box[0],box[1],box[2],
ncell[0],ncell[1],ncell[2]);
}
//============================================================
// CALCULATE INTERACTIONS
//
// For all vectors, go through neighbors and save interactions
// in vectors that are used to create a sparse matrix
// Allocate a guess based on average density +50%
int maxel = round( 1.5 * N*N*4*PI*rc*rc*rc/3/(box[0]*box[1]*box[2]) );
int numel = 0;
row = __MALLOC(maxel*sizeof(int));
col = __MALLOC(maxel*sizeof(int));
for(i=0;i<=2;i++)
for(j=i;j<=2;j++)
{
val[i][j] = __MALLOC(maxel*sizeof(double));
}
#ifdef _OPENMP
int barrier_in[2] = {0,0};
int barrier_out[2] = {0,0};
int realloc_done=0;
int num_procs;
#pragma omp parallel private(i,j) \
shared(numel,maxel,row,col,val,box,x,nvec,head,ll,px,py,pz,ncell,rn,barrier_in,barrier_out,realloc_done,num_procs) \
default(none)
#endif
{ // Begin parallel section
int head_idx;
int icell[3], home_cell[3];
int idx_s,idx_t,ip;
double rsq;
double pshift[3], xs[3], ns[3], nt[3], xr[3];
double A1[3][3], A2[3][3];
const double rcsq = rc*rc;
// Allocate a bufffer of interactions to be written
// into triplet list
const int buf_size = 256;
int buf_cnt = 0;
int idx_buf, next_idx_t;
int* restrict buf_idx_t;
double* restrict buf_xr;
double* restrict buf_rsq;
double* restrict C;
double* restrict D;
int tnum = 0;
#ifdef _OPENMP
tnum = omp_get_thread_num();
#pragma omp single
num_procs = omp_get_num_threads();
if(VERBOSE)
{
#pragma omp master
__PRINTF("[RSRC] Running on %d threads.\n",num_procs);
}
// Seems mxMalloc/mxFree are not thread safe
#pragma omp critical
{
#endif
buf_idx_t = __MALLOC(buf_size*sizeof(int));
buf_xr = __MALLOC(3*buf_size*sizeof(double));
buf_rsq = __MALLOC(buf_size*sizeof(double));
C = __MALLOC(buf_size*sizeof(double));
D = __MALLOC(buf_size*sizeof(double));
#ifdef _OPENMP
}
#pragma omp for schedule(dynamic) nowait
#endif
// Loop over all points
for(idx_s=0;idx_s<N;idx_s++)
{
// Source point
xs[0] = x[idx_s ];
xs[1] = x[idx_s+N ];
xs[2] = x[idx_s+2*N];
// Source point normal vector
ns[0] = nvec[idx_s ];
ns[1] = nvec[idx_s+N ];
ns[2] = nvec[idx_s+2*N];
// Determine home cell
for(j=0; j<3; j++)
home_cell[j] = floor( xs[j]/rn );
// Iterate through near cells (including home cell)
for(ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Periodic wrap
for(j=0; j<3; j++)
{
// (Could do this with mod)
pshift[j] = 0;
if(icell[j] >= ncell[j])
{
icell[j] = 0;
pshift[j] = box[j];
}
else if(icell[j]<0)
{
icell[j] = ncell[j]-1;
pshift[j] = -box[j];
}
}
head_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
idx_t = head[head_idx];
while(1)
{
if(idx_t > idx_s)
{
// r points from s to t
for(j=0; j<3; j++)
xr[j] = x[idx_t+j*N] + pshift[j] - xs[j];
// Check if we are within truncation radius
rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2];
if(rsq <= rcsq)
{
// Yes, so put interaction in buffer
buf_idx_t[buf_cnt] = idx_t;
buf_rsq[buf_cnt] = rsq;
for(i=0;i<3;i++)
buf_xr[3*buf_cnt+i] = xr[i];
buf_cnt++;
}
}
// Save location of next point in cell chain
if(idx_t == -1)
next_idx_t = -1;
else
next_idx_t = ll[idx_t];
// Empty buffer if last point of last neighbour,
// or buffer full
if ( (ip==26 && next_idx_t==-1) || buf_cnt==buf_size)
{
// Check if we have enough space to hold buffer contents
int idx_write, can_write;
#ifdef _OPENMP
#pragma omp critical
#endif
{ /* begin critical section */
// Check if buffer holds writing space for me
if(maxel-numel <= 2*buf_cnt) {
can_write = 0;
//__PRINTF("[%d] Can't write, reallocation needed! \n",tnum);
}
else
can_write = 1;
// Reserve writing in either case
idx_write = numel;
numel += 2*buf_cnt;
} /* end critical section */
/* Begin can_write==0 */
if(can_write==0)
{
int alloc_add = buf_size; // How much to add to allocation (single thread)
#ifdef _OPENMP
// Everybody has to wait here before reallocation
// Allocate more than a fuller buffer for every thread
alloc_add = num_procs*buf_size;
#pragma omp critical
realloc_done = 0; // Everybody agrees reallocation has not been done
barrier(0, barrier_in, barrier_out, &num_procs);
#pragma omp critical
{ // Critical section
if(realloc_done==0)
{
realloc_done=1;
#endif
// Allocate for full buffer(s) + 20% more
int new_maxel = ceil(1.2*(maxel+alloc_add));
if (VERBOSE)
__PRINTF("[RSRC][%d] Reallocating triplet vectors %d -> %d\n",tnum,maxel,new_maxel);
maxel = new_maxel;
row = __REALLOC(row, maxel*sizeof(int));
col = __REALLOC(col, maxel*sizeof(int));
for(i=0;i<=2;i++)
for(j=i;j<=2;j++)
val[i][j] = __REALLOC(val[i][j], maxel*sizeof(double));
#ifdef _OPENMP
//__PRINTF("[%d] Done \n",tnum);
}
else
{
//__PRINTF("[%d] Someone else reallocated \n",tnum);
}
}
barrier(1, barrier_in, barrier_out, &num_procs);
#endif
}
/* End can_write==0 */
// Do delayed calculations
op_A_CD(C,D,buf_rsq,buf_cnt,xi);
//#pragma omp critical
//__PRINTF("[%d] Begin write \n",tnum);
// Write triplets
for(idx_buf=0;idx_buf<buf_cnt;idx_buf++)
{
idx_t = buf_idx_t[idx_buf];
for(i=0;i<3;i++)
xr[i] = buf_xr[3*idx_buf+i];
// Source point normal vector
nt[0] = nvec[idx_t ];
nt[1] = nvec[idx_t+N ];
nt[2] = nvec[idx_t+2*N];
// Calculate interactions t->s and s<-t
op_A_symm_CD(A1,A2,xr,ns,nt,xi,C[idx_buf],D[idx_buf]);
// Append results to row,col,val vectors
row[idx_write] = idx_t;
col[idx_write] = idx_s;
for(i=0; i<=2; i++)
for(j=i; j<=2; j++)
{
val[i][j][idx_write] = A1[i][j];
}
idx_write++;
row[idx_write] = idx_s;
col[idx_write] = idx_t;
for(i=0; i<=2; i++)
for(j=i; j<=2; j++)
{
val[i][j][idx_write] = A2[i][j];
}
idx_write++;
} // endfor buffer
//#pragma omp critical
//__PRINTF("[%d] End write \n",tnum);
buf_cnt = 0;
} // endif chainend or buffull
idx_t = next_idx_t;
if(idx_t == -1)
break; // Chain ended
} // End of neighbours in this cell
} // End of cells
} // End of particles
#ifdef _OPENMP
#pragma omp critical
{
//__PRINTF("[%d] Exit loop , barrier_in={%d,%d}\n",tnum, barrier_in[0], barrier_in[1]);
#pragma omp atomic
// One less thread going around in loop
num_procs--;
}
#pragma omp critical
#endif
{
__FREE(buf_idx_t);
__FREE(buf_xr);
__FREE(buf_rsq);
__FREE(C);
__FREE(D);
}
} // End parallel section
__FREE(head);
__FREE(ll);
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] Triplets generated in %.3f seconds.\n", time_spent);
}
//============================================
// SORT RESULTS WITH COUNTING + QUICK SORT
// Counting sort on columns, then quicksort on rows
// in each column
// (Turns out this is counting sort rather than bucket sort,
// which I initially thought, hence the buck_* naming.)
gettimeofday(&tic, NULL);
buck_size = __MALLOC(N*sizeof(int));
idx_in_array = __MALLOC(numel*sizeof(int));
int* restrict buck_count = __MALLOC(N*sizeof(int));
int* restrict buck_pos = __MALLOC(N*sizeof(int));
int buck_idx,new_idx;
// Init lists
for(i=0;i<N;i++)
{
buck_size[i]=0;
buck_count[i]=0;
}
// Count number of elements in each bucket (column)
for(i=0;i<numel;i++)
{
buck_idx = col[i];
buck_size[buck_idx]++;
}
// Cumulative addition to get locations of each bucket after sort,
// + save largest bucket size for later.
buck_pos[0] = 0;
for(i=1;i<N;i++)
{
buck_pos[i] = buck_pos[i-1]+buck_size[i-1];
}
// Assign each element to a bucket, store permutations in idx_in_array
int* restrict rowtmp = __MALLOC(numel*sizeof(int));
for(i=0;i<numel;i++)
{
buck_idx = col[i];
new_idx = buck_pos[buck_idx] + buck_count[buck_idx];
idx_in_array[ new_idx ] = i;
buck_count[buck_idx]++;
}
__FREE(buck_count); // Free counter
// Sort rows using permutations
// (work-shared)
#ifdef _OPENMP
#pragma omp parallel for default(shared)
#endif
for(i=0;i<numel;i++)
rowtmp[i] = row[ idx_in_array[i] ];
__FREE(row);
row = rowtmp;
if(nlhs==1)
{
__FREE(col); // Free column list if only returning matrix,
}
else
{
// else sort columns too.
// Could be done faster with bucket info, but sorted columns are
// not needed for real application.
int* restrict coltmp = __MALLOC(numel*sizeof(int));
for(i=0;i<numel;i++)
coltmp[i] = col[ idx_in_array[i] ];
__FREE(col);
col = coltmp;
}
gettimeofday(&toc,NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
__PRINTF("[RSRC] Counting sort of cols finished in %.3f seconds.\n", time_spent);
gettimeofday(&tic,NULL);
// Quicksort on buckets
// Each bucket contains a compressed column.
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) default(none) shared(buck_pos,buck_size,idx_in_array,row)
#endif
for(buck_idx=0;buck_idx<N;buck_idx++)
{
int begin = buck_pos[buck_idx];
int size = buck_size[buck_idx];
quicksort(row, idx_in_array, begin, begin+size-1) ;
}
__FREE(buck_pos); // Free bucket list
gettimeofday(&toc,NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
__PRINTF("[RSRC] Quicksort of rows finished in %.3f seconds.\n", time_spent);
// Set return pointers
*row_p = row;
*col_p = col;
*buck_size_p = buck_size;
*idx_in_array_p = idx_in_array;
*numel_p = numel;
}
// ==== BUILD CELL LIST
//
// TODO: Add some assertions to make sure rc not too big,
// and that box can be divided into square cells.
static void build_cell_list(
// Input
const double* restrict x,
const int N,
const double* restrict box,
const double rc,
// Output
double* rn_p,
int ncell[3],
int* restrict *ll_p,
int* restrict *head_p
)
{
int i,j;
int head_idx, ncell_tot;
int icell[3];
int* restrict ll;
int* restrict head;
double boxmin, rn;
// Setup cell partitioning
boxmin = box[0];
if(box[1]<boxmin)
boxmin = box[1];
if (box[2]<boxmin)
boxmin = box[2];
rn = boxmin / floor(boxmin/rc);
for(i=0;i<3;i++)
ncell[i] = round( box[i]/rn );
ncell_tot = ncell[0]*ncell[1]*ncell[2];
// Prepare cell list
ll = __MALLOC(N*sizeof(int));
head = __MALLOC(ncell_tot*sizeof(int));
for(i=0; i<ncell_tot; i++)
head[i] = -1;
// Do cell partitioning
for(i=0; i<N; i++)
{
for(j=0; j<3; j++)
icell[j] = floor( x[i+N*j]/rn );
head_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
ll[i] = head[head_idx];
head[head_idx] = i;
}
*rn_p = rn;
*ll_p = ll;
*head_p = head;
}
//============ QUICKSORT ROUTINE
// Applies quicksort on an interval (m,n) of *list,
// performs the same permutations on *slave.
// Uses a private stack instead of making recursive calls.
static void quicksort(int* restrict list, int* restrict slave, int m, int n) {
#define MAX_LEVELS 64
int beg[MAX_LEVELS], end[MAX_LEVELS]; // Stack
int key,i,j,k,s,tmp;
s=0;
beg[0]=m;
end[0]=n;
while (s>=0)
{ // While work in stack, pop
m=beg[s];
n=end[s];
if (m<n)
{
k = m+(n-m)/2; // Choose middle for pivot
SWAP(list[m],list[k]); // Swap out pivot
SWAP(slave[m],slave[k]);
// Do quicksort
key = list[m];
i = m+1;
j = n;
while(i <= j)
{
while((i <= n) && (list[i] <= key))
i++;
while((j >= m) && (list[j] > key))
j--;
if( i < j)
{
SWAP(list[i],list[j]);
SWAP(slave[i],slave[j]);
}
}
// Swap in pivot at right place
SWAP(list[m],list[j]);
SWAP(slave[m],slave[j]);
if(s == MAX_LEVELS-1) // Stack full
{
__PRINTF("ERROR. Quicksort reached MAX_LEVELS\n");
return;
}
// Recursively sort the lesser list
beg[s] = m;
end[s] = j-1;
beg[s+1]=j+1;
end[s+1]=n;
s += 1;
// Do shortest interval first to limit stack use
if (end[s]-beg[s]>end[s-1]-beg[s-1])
{
SWAP(beg[s],beg[s-1]);
SWAP(end[s],end[s-1]);
}
}
else
{
s--;
}
}
}
//============ Home-brewed barrier
static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs)
{
#ifdef _OPENMP
int tnum = omp_get_thread_num();
// Barrrier arrive
#pragma omp critical
{
barrier_in[bar_num]++; // Announce you arrived at barrier
//__PRINTF("[%d] Reached barrier %d (%d,%d) \n", tnum, bar_num, barrier_in[bar_num], *num_procs);
}
// Barrier spin
while(barrier_in[bar_num] < *num_procs) {
#pragma omp flush
};
// Barrier depart
#pragma omp critical
{
barrier_out[bar_num]++; // Anounce you passed barrier
//__PRINTF("[%d] Passed barrier %d (%d,%d) \n", tnum, bar_num, barrier_out[bar_num], *num_procs);
}
// Barrier reset
#pragma omp critical
{
if (barrier_out[bar_num] >= *num_procs)
{
//__PRINTF("[%d] Everybody passed barrier %d. \n",tnum, bar_num);
barrier_in[bar_num] = 0;
barrier_out[bar_num] = 0;
}
}
#endif
}
// ******************************** compute_rsrc_direct ******************
// ***********************************************************************
// ==== Compute result directly
// Do not build sparse matrix
void compute_rsrc_direct (const double* restrict x,
const double* restrict nvec,
const double* restrict fvec,
const int N,
const double* restrict box,
const double xi,
const double rc,
double* restrict *phi_p
)
{
// Setup output
double* restrict phi_out = __MALLOC(3*N*sizeof(double));
for(int i=0;i<3*N;i++)
phi_out[i] = 0.0;
// Setup variables
int ncell[3];
int* restrict ll;
int* restrict head;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
struct timeval tic, toc;
gettimeofday(&tic, NULL);
double time_spent;
// Build cell list
build_cell_list(x, N, box, rc, &rn, ncell, &ll, &head);
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] Cell list built in %.3f seconds.\n", time_spent);
}
if(VERBOSE)
{
__PRINTF("[RSRC] MATRIX-FREE\n");
__PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi);
__PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn);
__PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n",
box[0],box[1],box[2],
ncell[0],ncell[1],ncell[2]);
}
gettimeofday(&tic, NULL);
#ifdef _OPENMP
#pragma omp parallel shared(phi_out,box,x,nvec,fvec,head,ll,px,py,pz,ncell,rn) default(none)
#endif
{ // Begin parallel section
// Setup local output
double* restrict phi = __MALLOC(3*N*sizeof(double));
for(int i=0;i<3*N;i++)
phi[i] = 0.0;
int i,j;
int head_idx;
int icell[3], home_cell[3];
int idx_s,idx_t,ip;
double rsq;
double pshift[3], xs[3], ns[3], fs[3], nt[3], ft[3], xr[3];
double A1[3][3], A2[3][3];
const double rcsq = rc*rc;
// Allocate a bufffer of interactions to be written
// into triplet list
const int buf_size = 256;
int buf_cnt = 0;
int idx_buf, next_idx_t;
int buf_idx_t[buf_size];
double buf_xr[3*buf_size];
double buf_rsq[buf_size];
double C[buf_size];
double D[buf_size];
int num_procs = 1;
#ifdef _OPENMP
num_procs = omp_get_num_threads();
if(VERBOSE)
{
#pragma omp master
__PRINTF("[RSRC] Running on %d threads.\n",num_procs);
}
#pragma omp for schedule(dynamic) nowait
#endif
// Loop over all points
for(idx_s=0;idx_s<N;idx_s++)
{
double phi_idx_s[3] = {0.0, 0.0, 0.0};
// Source point
xs[0] = x[idx_s ];
xs[1] = x[idx_s+N ];
xs[2] = x[idx_s+2*N];
// Source point normal vector
ns[0] = nvec[idx_s ];
ns[1] = nvec[idx_s+N ];
ns[2] = nvec[idx_s+2*N];
// Source point distribution density
fs[0] = fvec[idx_s ];
fs[1] = fvec[idx_s+N ];
fs[2] = fvec[idx_s+2*N];
// Determine home cell
for(j=0; j<3; j++)
home_cell[j] = floor( xs[j]/rn );
// Iterate through near cells (including home cell)
for(ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Periodic wrap
for(j=0; j<3; j++)
{
// (Could do this with mod)
pshift[j] = 0;
if(icell[j] >= ncell[j])
{
icell[j] = 0;
pshift[j] = box[j];
}
else if(icell[j]<0)
{
icell[j] = ncell[j]-1;
pshift[j] = -box[j];
}
}
head_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
idx_t = head[head_idx];
while(1)
{
if(idx_t > idx_s)
{
// r points from s to t
for(j=0; j<3; j++)
xr[j] = x[idx_t+j*N] + pshift[j] - xs[j];
// Check if we are within truncation radius
rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2];
if(rsq <= rcsq)
{
// Yes, so put interaction in buffer
buf_idx_t[buf_cnt] = idx_t;
buf_rsq[buf_cnt] = rsq;
for(i=0;i<3;i++)
buf_xr[3*buf_cnt+i] = xr[i];
buf_cnt++;
}
}
// Save location of next point in cell chain
if(idx_t == -1)
next_idx_t = -1;
else
next_idx_t = ll[idx_t];
// Empty buffer if last point of last neighbour,
// or buffer full
if ( (ip==26 && next_idx_t==-1) || buf_cnt==buf_size)
{
// Do delayed calculations
op_A_CD(C,D,buf_rsq,buf_cnt,xi);
// Save interactions
for(idx_buf=0;idx_buf<buf_cnt;idx_buf++)
{
idx_t = buf_idx_t[idx_buf];
for(i=0;i<3;i++)
xr[i] = buf_xr[3*idx_buf+i];
// Target point normal vector
nt[0] = nvec[idx_t ];
nt[1] = nvec[idx_t+N ];
nt[2] = nvec[idx_t+2*N];
// Target point distribution density
ft[0] = fvec[idx_t ];
ft[1] = fvec[idx_t+N ];
ft[2] = fvec[idx_t+2*N];
// Calculate interactions t->s and s<-t
double phi_idx_t[3] = {0.0,0.0,0.0};
op_A_comp_symm_CD(xr,phi_idx_s,phi_idx_t,ns,nt,fs,ft,xi,C[idx_buf],D[idx_buf]);
for(i=0; i<3; i++)
phi[idx_t+N*i] += phi_idx_t[i];
} // endfor buffer
buf_cnt = 0;
} // endif chainend or buffull
idx_t = next_idx_t;
if(idx_t == -1)
break; // Chain ended
} // End of neighbours in this cell
} // End of cells
// Save additions to point s
phi[idx_s ] += phi_idx_s[0];
phi[idx_s+N ] += phi_idx_s[1];
phi[idx_s+2*N] += phi_idx_s[2];
} // End of particles
#ifdef _OPENMP
// Yes, this reduction is probably crap HPC-wise,
// but it works well on my quad core right now.
struct timeval tic_red, toc_red;
#pragma omp master
gettimeofday(&tic_red, NULL);
for(i=0; i<3*N; i++)
{
#pragma omp atomic
phi_out[i] += phi[i];
}
#pragma omp master
{
gettimeofday(&toc_red, NULL);
double time_spent = DELTA(tic_red,toc_red);
if(VERBOSE)
__PRINTF("[RSRC] Reduction took %.3f seconds.\n", time_spent);
}
// free/malloc not thread safe under MEX
#pragma omp critical
__FREE(phi);
#else
__FREE(phi_out);
phi_out = phi;
#endif
} // End parallel section
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] phi computed in %.3f seconds.\n", time_spent);
}
*phi_p = phi_out;
}
// ******************************** compute_rsrc_direct ******************
// ***********************************************************************
// ==== BUILD CELL LIST (NEW)
//
// TODO: Add some assertions to make sure rc not too big,
// and that box can be divided into square cells.
static void build_cell_list_new(
// Input
const double* restrict x,
const int N,
const double* restrict box,
const double rc,
// Output
double* rn_p,
int ncell[3],
int* restrict *cell_list_p,
int* restrict *cell_idx_p
)
{
int i,j;
int head_idx, ncell_tot;
int icell[3];
double boxmin, rn;
// Outputs
int* restrict cell_list;
int* restrict cell_idx;
// Intermediates (could do this with fewer vars, but this is clear)
int* restrict cell_count;
int* restrict points_in_cell;
int* restrict point_cell_map;
// Setup cell partitioning
boxmin = box[0];
if(box[1]<boxmin)
boxmin = box[1];
if (box[2]<boxmin)
boxmin = box[2];
rn = boxmin / floor(boxmin/rc);
for(i=0;i<3;i++)
ncell[i] = round( box[i]/rn );
ncell_tot = ncell[0]*ncell[1]*ncell[2];
// Prepare arrays
cell_list = __MALLOC(N*sizeof(int));
cell_idx = __MALLOC((ncell_tot+1)*sizeof(int));
point_cell_map = __MALLOC(N*sizeof(int));
points_in_cell = __MALLOC(ncell_tot*sizeof(int));
for(i=0; i<ncell_tot; i++)
points_in_cell[i] = 0;
// Build list in two sweeps
for(i=0; i<N; i++)
{
for(j=0; j<3; j++)
icell[j] = x[i*3+j]/rn;
int icell_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
points_in_cell[icell_idx]++;
point_cell_map[i] = icell_idx;
}
// Generate adressing
cell_idx[0]=0;
for (int i=0; i<ncell_tot; i++)
cell_idx[i+1] = cell_idx[i]+points_in_cell[i];
// Setup new vector
__FREE(points_in_cell);
cell_count = __MALLOC(ncell_tot*sizeof(int));
for(i=0; i<ncell_tot; i++)
cell_count[i] = 0;
// Finally build list
for(i=0; i<N; i++)
{
int icell_idx = point_cell_map[i];
int adr = cell_idx[icell_idx] + cell_count[icell_idx];
cell_list[adr] = i;
cell_count[icell_idx]++;
}
__FREE(cell_count);
__FREE(point_cell_map);
*rn_p = rn;
*cell_list_p = cell_list;
*cell_idx_p = cell_idx;
}
// Transpose vector
void transpose(const double* restrict in, double* restrict out, const int N)
{
for(int i=0; i<N; i++)
{
for(int j=0; j<3; j++)
{
out[i*3+j] = in[i+j*N];
}
}
}
// ==== Compute result directly
// Do not build sparse matrix
void compute_rsrc_direct_new (const double* restrict x_in,
const double* restrict nvec_in,
const double* restrict fvec_in,
const int N,
const double* restrict box,
const double xi,
const double rc,
double* restrict *phi_p
)
{
struct timeval tic, toc;
gettimeofday(&tic, NULL);
double time_spent;
// Fix input (legacy format gives bad memory access)
double* restrict x = __MALLOC(3*N*sizeof(double));
double* restrict nvec = __MALLOC(3*N*sizeof(double));
double* restrict fvec = __MALLOC(3*N*sizeof(double));
transpose(x_in, x, N);
transpose(fvec_in, fvec, N);
transpose(nvec_in, nvec, N);
gettimeofday(&toc, NULL);
double time_tr = DELTA(tic,toc);
// Setup output
double* restrict phi_out = __MALLOC(3*N*sizeof(double));
for(int i=0;i<3*N;i++)
phi_out[i] = 0.0;
// Setup variables
int ncell[3];
int* restrict cell_list;
int* restrict cell_idx;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Build cell list
gettimeofday(&tic, NULL);
build_cell_list_new(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx);
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] MATRIX-FREE (NEW)\n");
__PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi);
__PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn);
__PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n",
box[0],box[1],box[2],
ncell[0],ncell[1],ncell[2]);
__PRINTF("[RSRC] Cell list built in %.3f seconds.\n", time_spent);
}
gettimeofday(&tic, NULL);
#ifdef _OPENMP
#pragma omp parallel \
shared(phi_out,box,x,nvec,fvec,cell_list,cell_idx, \
px,py,pz,ncell,rn) \
default(none)
#endif
{ // Begin parallel section
// Setup local output
double* restrict phi = __MALLOC(3*N*sizeof(double));
for(int i=0;i<3*N;i++)
phi[i] = 0.0;
int i,j;
int icell_idx;
int icell[3], home_cell[3];
int idx_s,idx_t,ip;
double rsq;
double pshift[3], xs[3], ns[3], fs[3], nt[3], ft[3], xr[3];
double A1[3][3], A2[3][3];
const double rcsq = rc*rc;
// Allocate a bufffer of interactions to be written
// into triplet list
const int buf_size = 256;
int buf_cnt = 0;
int idx_buf, next_idx_t;
int buf_idx_t[buf_size];
double buf_xr[3*buf_size];
double buf_rsq[buf_size];
double C[buf_size];
double D[buf_size];
int num_procs = 1;
#ifdef _OPENMP
num_procs = omp_get_num_threads();
if(VERBOSE)
{
#pragma omp master
__PRINTF("[RSRC] Running on %d threads.\n",num_procs);
}
#pragma omp for schedule(dynamic) nowait
#endif
// Loop over all points (work-shared)
for(idx_s=0;idx_s<N;idx_s++)
{
double phi_idx_s[3] = {0.0, 0.0, 0.0};
for(i=0; i<3; i++)
{
// Source point
double xi = x[idx_s*3+i];
xs[i] = xi;
// Determine home cell
// Source point normal vector
ns[i] = nvec[idx_s*3+i];
// Source point distribution density
fs[i] = fvec[idx_s*3+i];
}
for(i=0; i<3; i++)
home_cell[i] = xs[i]/rn;
// Iterate through near cells (including home cell)
for(ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Periodic wrap
for(j=0; j<3; j++)
{
// (Could do this with mod)
pshift[j] = 0;
if(icell[j] >= ncell[j])
{
icell[j] = 0;
pshift[j] = box[j];
}
else if(icell[j]<0)
{
icell[j] = ncell[j]-1;
pshift[j] = -box[j];
}
}
icell_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
int cell_a = cell_idx[icell_idx];
int cell_b = cell_idx[icell_idx+1];
for(int point_idx=cell_a; point_idx<cell_b; point_idx++)
{
idx_t = cell_list[point_idx];
if(idx_t > idx_s)
{
// r points from s to t
for(j=0; j<3; j++)
xr[j] = x[idx_t*3+j] + pshift[j] - xs[j];
// Check if we are within truncation radius
rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2];
if(rsq <= rcsq)
{
// Yes, so put interaction in buffer
buf_idx_t[buf_cnt] = idx_t;
buf_rsq[buf_cnt] = rsq;
for(i=0;i<3;i++)
buf_xr[3*buf_cnt+i] = xr[i];
buf_cnt++;
}
}
// Empty buffer if last point of last neighbour,
// or buffer full
if ( (ip==26 && point_idx==(cell_b-1)) || buf_cnt==buf_size)
{
// Do delayed calculations
op_A_CD(C,D,buf_rsq,buf_cnt,xi);
// Save interactions
for(idx_buf=0;idx_buf<buf_cnt;idx_buf++)
{
idx_t = buf_idx_t[idx_buf];
for(i=0;i<3;i++)
{
xr[i] = buf_xr[3*idx_buf+i];
// Target point normal vector
nt[i] = nvec[idx_t*3+i];
// Target point distribution density
ft[i] = fvec[idx_t*3+i];
}
// Calculate interactions t->s and s<-t
double phi_idx_t[3] = {0.0,0.0,0.0};
op_A_comp_symm_CD(xr,phi_idx_s,phi_idx_t,ns,nt,fs,ft,xi,C[idx_buf],D[idx_buf]);
for(i=0; i<3; i++)
phi[idx_t*3+i] += phi_idx_t[i];
} // endfor buffer
buf_cnt = 0;
} // endif chainend or buffull
} // End of neighbours in this cell
} // End of cells
// Save additions to point s
for(int i=0; i<3; i++)
phi[idx_s*3+i] += phi_idx_s[i];
} // End of particles
#ifdef _OPENMP
// Yes, this reduction is probably crap HPC-wise,
// but it works well on my quad core right now.
struct timeval tic_red, toc_red;
#pragma omp master
gettimeofday(&tic_red, NULL);
for(i=0; i<3*N; i++)
{
#pragma omp atomic
phi_out[i] += phi[i];
}
#pragma omp master
{
gettimeofday(&toc_red, NULL);
double time_spent = DELTA(tic_red,toc_red);
if(VERBOSE)
__PRINTF("[RSRC] Reduction took %.3f seconds.\n", time_spent);
}
// free/malloc not thread safe under MEX
#pragma omp critical
__FREE(phi);
#else
__FREE(phi_out);
phi_out = phi;
#endif
} // End parallel section
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
gettimeofday(&tic, NULL);
__FREE(x);
__FREE(nvec);
__FREE(fvec);
double* restrict phi_tr = __MALLOC(3*N*sizeof(double));
for(int i=0; i<N; i++)
{
for(int j=0; j<3; j++)
{
phi_tr[i+j*N] = phi_out[i*3+j];
}
}
__FREE(phi_out);
gettimeofday(&toc, NULL);
time_tr += DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] Transpose time: %.3f seconds.\n", time_tr);
__PRINTF("[RSRC] phi computed in %.3f seconds.\n", time_spent);
}
*phi_p = phi_tr;
}
|
GB_unaryop__lnot_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int8
// op(A') function: GB_tran__lnot_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int8
(
float *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph-gen.c | /*
* "Restless reachability in temporal graphs: algebraic methods and applications"
*
* This experimental source code is supplied to accompany the
* aforementioned paper.
*
* The source code is configured for a gcc build to a native
* microarchitecture that must support the AVX2 and PCLMULQDQ
* instruction set extensions. Other builds are possible but
* require manual configuration of 'Makefile' and 'builds.h'.
*
* The source code is subject to the following license.
*
* The MIT License (MIT)
*
* Copyright (c) 2020 Anonymous authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<time.h>
#include<sys/utsname.h>
#include<string.h>
#include<stdarg.h>
#include<assert.h>
#include<math.h>
#include<omp.h>
typedef long int index_t;
#include"ffprng.h"
#define UNDEFINED -1
#define BUILD_PARALLEL
/******************************************************* Common subroutines. */
#define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__);
static void error(const char *fn, int line, const char *func,
const char *format, ...)
{
va_list args;
va_start(args, format);
fprintf(stderr,
"ERROR [file = %s, line = %d]\n"
"%s: ",
fn,
line,
func);
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
#define MAX_HOSTNAME 256
const char *sysdep_hostname(void)
{
static char hn[MAX_HOSTNAME];
struct utsname undata;
uname(&undata);
strcpy(hn, undata.nodename);
return hn;
}
double inGiB(size_t s)
{
return (double) s / (1 << 30);
}
/******************************************************** Memory allocation. */
#define MALLOC(id, x) malloc_wrapper(id, x)
#define FREE(id, x) free_wrapper(id, x)
index_t malloc_balance = 0;
void *malloc_wrapper(const char *id, size_t size)
{
void *p = malloc(size);
if(p == NULL)
ERROR("malloc fails");
malloc_balance++;
#ifdef MEM_INVENTORY
fprintf(stdout, "alloc: %10s %7.3lf GiB\n", id, inGiB(size));
fflush(stdout);
#endif
return p;
}
void free_wrapper(const char *id, void *p)
{
free(p);
malloc_balance--;
#ifdef MEM_INVENTORY
fprintf(stdout, "free: %10s\n", id);
fflush(stdout);
#endif
}
index_t *alloc_idxtab(index_t n)
{
index_t *t = (index_t *) MALLOC("t", sizeof(index_t)*n);
return t;
}
/****************************************************************** sorting. */
void shellsort(index_t n, index_t *a)
{
index_t h = 1;
index_t i;
for(i = n/3; h < i; h = 3*h+1)
;
do {
for(i = h; i < n; i++) {
index_t v = a[i];
index_t j = i;
do {
index_t t = a[j-h];
if(t <= v)
break;
a[j] = t;
j -= h;
} while(j >= h);
a[j] = v;
}
h /= 3;
} while(h > 0);
}
#ifdef DEBUG
void print_array(const char *name, index_t n, index_t *a, index_t offset)
{
fprintf(stdout, "%s (%ld):", name, n);
for(index_t i = 0; i < n; i++) {
fprintf(stdout, " %ld", a[i] == -1 ? -1 : a[i]+offset);
}
fprintf(stdout, "\n");
}
#endif
/************************************************* Random number generators. */
index_t irand(void)
{
return (((index_t) rand())<<31)^((index_t) rand());
}
void rand_nums(index_t n, index_t *p, index_t seed)
{
srand(seed);
for(index_t i = 0; i < n; i++)
p[i] = irand();
}
void randseq_range(index_t n, index_t range, index_t *p, index_t seed)
{
#ifdef BUILD_PARALLEL
index_t nt = 64;
#else
index_t nt = 1;
#endif
ffprng_t base;
FFPRNG_INIT(base, seed);
index_t block_size = n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? n-1 : (start+block_size-1);
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t rs = (index_t) (rnd&0X7FFFFFFFFFFFFFFF);
p[i] = rs%range;
}
}
}
void randshuffle_seq(index_t n, index_t *p, ffprng_t gen)
{
for(index_t i = 0; i < n-1; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t x = i+(rnd%(n-i));
index_t t = p[x];
p[x] = p[i];
p[i] = t;
}
}
void randperm(index_t n, index_t *p, index_t seed)
{
#ifdef BUILD_PARALLEL
index_t nt = 64;
#else
index_t nt = 1;
#endif
index_t block_size = n/nt;
index_t f[128][128];
assert(nt < 128);
ffprng_t base;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
for(index_t j = 0; j < nt; j++)
f[t][j] = 0;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? n-1 : (start+block_size-1);
ffprng_t gen;
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
f[t][bin]++;
}
}
for(index_t bin = 0; bin < nt; bin++) {
for(index_t t = 1; t < nt; t++) {
f[0][bin] += f[t][bin];
}
}
index_t run = 0;
for(index_t j = 1; j <= nt; j++) {
index_t fp = f[0][j-1];
f[0][j-1] = run;
run += fp;
}
f[0][nt] = run;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = 0;
index_t stop = n-1;
index_t pos = f[0][t];
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
if(bin == t)
p[pos++] = i;
}
assert(pos == f[0][t+1]);
}
FFPRNG_INIT(base, (seed^0x9078563412EFDCABL));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t fwd, gen;
index_t start = f[0][t];
index_t stop = f[0][t+1]-1;
index_t u;
FFPRNG_FWD(fwd, (1234567890123456L*t), base);
FFPRNG_RAND(u, fwd);
FFPRNG_INIT(gen, u);
randshuffle_seq(stop-start+1, p + start, gen);
}
}
index_t *alloc_randperm(index_t n, index_t seed)
{
index_t *p = alloc_idxtab(n);
randperm(n, p, seed);
return p;
}
/************************************************ Rudimentary graph builder. */
enum graph_type{SMALL, REGULAR, NOMOTIF, POWLAW, CLIQUE, DENDOGRAM};
typedef struct
{
enum graph_type gtype;
index_t is_directed;
index_t num_vertices;
index_t num_edges;
index_t time_limit;
index_t edge_capacity;
index_t *edges;
} graph_t;
static index_t *enlarge(index_t m, index_t m_was, index_t *was)
{
assert(m >= 0 && m_was >= 0);
index_t *a = (index_t *) MALLOC("a", sizeof(index_t)*m);
index_t i;
if(was != (void *) 0) {
for(i = 0; i < m_was; i++) {
a[i] = was[i];
}
FREE("was", was);
}
return a;
}
graph_t *graph_alloc(index_t n)
{
assert(n >= 0);
graph_t *g = (graph_t *) MALLOC("g", sizeof(graph_t));
g->is_directed = 0; // default: undirected graph
g->num_vertices = n;
g->num_edges = 0;
g->time_limit = 0;
g->edge_capacity = 1000;
g->edges = enlarge(3*g->edge_capacity, 0, (void *) 0);
return g;
}
void graph_set_is_directed(graph_t *g, index_t is_dir)
{
assert(is_dir == 0 || is_dir == 1);
g->is_directed = is_dir;
}
void graph_free(graph_t *g)
{
FREE("g->edges", g->edges);
FREE("g", g);
}
void graph_add_edge(graph_t *g, index_t u, index_t v, index_t t)
{
assert(u >= 0 &&
v >= 0 &&
u < g->num_vertices &&
v < g->num_vertices);
assert(t >= 0 && t < g->time_limit);
if(g->num_edges == g->edge_capacity) {
g->edges = enlarge(6*g->edge_capacity, 6*g->edge_capacity, g->edges);
g->edge_capacity *= 2;
}
assert(g->num_edges < g->edge_capacity);
index_t *e = g->edges + 3*g->num_edges;
g->num_edges++;
e[0] = u;
e[1] = v;
e[2] = t;
//shellsort(2, e);
}
index_t *graph_edgebuf(graph_t *g, index_t cap)
{
//assert(g->has_target == 0);
g->edges = enlarge(3*g->edge_capacity+3*cap, 3*g->edge_capacity, g->edges);
index_t *e = g->edges + 3*g->num_edges;
g->edge_capacity += cap;
g->num_edges += cap;
return e;
}
/************************************************************ Output graph. */
void graph_out_dimacs(FILE *out, graph_t *g, index_t max_rt)
{
index_t n = g->num_vertices;
index_t m = g->num_edges;
//index_t k = g->motif_size;
index_t max_t = g->time_limit;
//index_t max_rt = g->rest_limit;
index_t *e = g->edges;
fprintf(out, "p motif %ld %ld %ld %ld %ld\n",
(long) n, (long) m, (long) max_t, (long) max_rt,
(long) g->is_directed);
// output edge list
for(index_t i = 0; i < m; i++) {
index_t *e_i = e + 3*i;
index_t u = e_i[0]+1;
index_t v = e_i[1]+1;
index_t t = e_i[2]+1;
fprintf(out, "e %ld %ld %ld\n", u, v, t);
}
}
void graph_out_rest_time(FILE *out, index_t n, index_t *rest_time)
{
for(index_t u = 0; u < n; u++)
fprintf(stdout, "r %ld %ld\n", u+1, rest_time[u]+1);
}
void graph_out_sources(FILE *out, index_t num_sources, index_t *sources)
{
if(num_sources == 0) return;
fprintf(out, "s %ld", num_sources);
for(index_t i = 0; i < num_sources; i++)
fprintf(out, " %ld", sources[i]==UNDEFINED ? sources[i] : sources[i]+1);
fprintf(out, "\n");
}
void graph_out_separators(FILE *out, index_t num_separators, index_t *separators)
{
if(num_separators == 0) return;
fprintf(out, "t %ld", num_separators);
for(index_t i = 0; i < num_separators; i++)
fprintf(out, " %ld", separators[i]==UNDEFINED ? separators[i] : separators[i]+1);
fprintf(out, "\n");
}
index_t *graph_degree_dist(graph_t *g)
{
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t *deg = alloc_idxtab(n);
for(index_t i = 0; i < n; i++)
deg[i] = 0;
for(index_t j = 0; j < m; j++) {
deg[g->edges[2*j]]++;
deg[g->edges[2*j+1]]++;
}
return deg;
}
void print_stat(FILE *out, graph_t *g)
{
index_t n = g->num_vertices;
index_t *deg = graph_degree_dist(g);
for(index_t i = 0; i < n; i++) {
fprintf(out, "deg[%ld] = %ld\n", i, deg[i]);
}
FREE("deg", deg);
}
/***************** A quick-and-dirty generator for a power law distribution. */
double beta(index_t n, index_t k)
{
double min, mid, max;
index_t niter = 200;
min = 1.0;
max = exp((log(n)/(double) (k-1)));
for(index_t i = 0; i < niter; i++) {
mid = (min+max)/2.0;
double nn = (1-pow(mid,k))/(1-mid);
if(nn < n)
min = mid;
if(nn > n)
max = mid;
}
return mid;
}
double degnormalizer(index_t n, index_t d, index_t *freq, index_t k, double alpha)
{
double fpowasum = 0.0;
for(index_t i = 0; i < k; i++)
fpowasum += pow(freq[i],alpha+1);
return log(d*n)-log(fpowasum);
}
void mkpowlawdist(index_t *deg, index_t *freq,
index_t n, index_t d, double alpha, index_t k)
{
double b = beta(n, k);
index_t fsum = 0;
for(index_t j = 0; j < k; j++) {
freq[j] = round((1-pow(b,j+1))/(1-b)-fsum);
fsum += freq[j];
}
double dn = degnormalizer(n, d, freq, k, alpha);
double t = 0.0;
index_t dfsum = 0;
for(index_t j = 0; j < k; j++) {
t += exp(dn)*pow(freq[j],alpha+1);
double tt = t-dfsum;
deg[j] = round(tt/freq[j]);
dfsum += deg[j]*freq[j];
}
if(dfsum % 2 == 1) {
index_t i = k-1;
for(; i >= 0; i--) {
if(deg[i] % 2 == 1 &&
freq[i] % 2 == 1) {
freq[i]++;
dfsum += deg[i];
break;
}
}
assert(i >= 0);
}
fprintf(stderr,
"powlaw: n = %ld, d = %ld, alpha = %lf, w = %ld, beta = %lf, norm = %lf\n",
n, d, alpha, k, b, dn);
}
/************************************************** Test graph generator(s). */
/* Generators for instances with a unique match. */
/* Use the bits of idx to plant a path on the corresponding
* vertices. Uses the n least significant bits of idx. */
/*
void graph_set_rand_target(graph_t *g, index_t k, index_t max_color,
index_t num_targets, index_t seed)
{
g->has_target = 1;
g->motif_size = k;
g->num_targets = num_targets;
g->target = (index_t *) MALLOC("g->target", sizeof(index_t)*k);
randseq_range(k, max_color, g->target, seed);
g->num_colors = max_color;
randseq_range(g->num_vertices, max_color, g->colors, seed);
index_t *vertex_shuffle = alloc_randperm(g->num_vertices, seed);
g->motif_counts = (index_t *) MALLOC("g->motif_counts", sizeof(index_t)*num_targets*k);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < num_targets; i++) {
for(index_t j = 0; j < k; j++) {
index_t u = vertex_shuffle[i*k+j];
g->motif_counts[i*k+j] = u;
g->colors[u] = g->target[j];
}
}
FREE("vertex_shuffle", vertex_shuffle);
}
*/
void get_rest_time_const(index_t n, index_t max_rt, index_t **rest_time_out)
{
index_t *rest_time = (index_t *) MALLOC("rest time", n*sizeof(index_t));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
rest_time[u] = max_rt-1;
*rest_time_out = rest_time;
}
void get_rest_time_random(index_t n, index_t max_rt, index_t seed, index_t **rest_time_out)
{
index_t *rest_time = (index_t *) MALLOC("rest time", n*sizeof(index_t));
randseq_range(n, max_rt, rest_time, seed);
*rest_time_out = rest_time;
}
void get_random_vertices(index_t k, index_t n, index_t seed, index_t **vertices_out)
{
//index_t *vertex_shuffle = (index_t *) MALLOC("vertex shuffle", n*sizeof(index_t));
index_t *vertex_shuffle = alloc_randperm(n, seed);
index_t *vertices = (index_t *) MALLOC("vertices", k*sizeof(index_t));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < k; i++) {
vertices[i] = vertex_shuffle[i];
}
FREE("vertex_shuffle", vertex_shuffle);
*vertices_out = vertices;
}
/* The configuration model.
* Warning: no check for repeated edges and loops. */
graph_t *graph_config_rand(index_t n, index_t ndd,
index_t max_time, index_t max_resttime,
index_t *degree, index_t *freq, index_t seed)
{
index_t num_incidences = 0;
for(index_t j = 0; j < ndd; j++)
num_incidences += degree[j]*freq[j];
assert(num_incidences % 2 == 0);
index_t *vertex_id = alloc_idxtab(num_incidences);
index_t pos = 0;
index_t vno = 0;
for(index_t j = 0; j < ndd; j++) {
for(index_t k = 0; k < freq[j]; k++) {
for(index_t l = 0; l < degree[j]; l++)
vertex_id[pos++] = vno;
vno++;
}
}
index_t *vertex_shuffle = alloc_randperm(n, seed);
index_t *incidence_shuffle = alloc_randperm(num_incidences, seed);
index_t *time_shuffle = (index_t *) MALLOC("time_shuffle", sizeof(index_t)*num_incidences/2);
randseq_range(num_incidences/2, max_time, time_shuffle, seed);
graph_t *g = graph_alloc(n);
index_t *e = graph_edgebuf(g, num_incidences/2);
g->time_limit = max_time;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < num_incidences/2; i++) {
index_t u = vertex_shuffle[vertex_id[incidence_shuffle[2*i]]];
index_t v = vertex_shuffle[vertex_id[incidence_shuffle[2*i+1]]];
index_t t = time_shuffle[i];
//if(u > v) {
// index_t temp = u;
// u = v;
// v = temp;
//}
e[3*i] = u;
e[3*i+1] = v;
e[3*i+2] = t;
}
FREE("vertex_id", vertex_id);
FREE("vertex_shuffle", vertex_shuffle);
FREE("incidence_shuffle", incidence_shuffle);
FREE("time_shuffle", time_shuffle);
return g;
}
/*
graph_t *graph_config_rand_test(index_t n, index_t ndd, index_t max_time,
index_t max_resttime, index_t num_targets,
index_t *degree, index_t *freq, index_t seed)
{
index_t num_incidences = 0;
for(index_t j = 0; j < ndd; j++)
num_incidences += degree[j]*freq[j];
assert(num_incidences % 2 == 0);
index_t *vertex_id = alloc_idxtab(num_incidences);
index_t pos = 0;
index_t vno = 0;
for(index_t j = 0; j < ndd; j++) {
for(index_t k = 0; k < freq[j]; k++) {
for(index_t l = 0; l < degree[j]; l++)
vertex_id[pos++] = vno;
vno++;
}
}
index_t *vertex_shuffle = alloc_randperm(n, seed);
index_t *incidence_shuffle = alloc_randperm(num_incidences, seed);
index_t *time_shuffle = (index_t *) MALLOC("time_shuffle", sizeof(index_t)*num_incidences/2);
randseq_range(num_incidences/2, max_time, time_shuffle, seed);
graph_t *g = graph_alloc(n + num_targets);
index_t *e = graph_edgebuf(g, num_incidences/2);
g->time_limit = max_time;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < num_incidences/2; i++) {
index_t u = vertex_shuffle[vertex_id[incidence_shuffle[2*i]]];
index_t v = vertex_shuffle[vertex_id[incidence_shuffle[2*i+1]]];
index_t t = time_shuffle[i];
if(u > v) {
index_t temp = u;
u = v;
v = temp;
}
e[3*i] = u;
e[3*i+1] = v;
e[3*i+2] = t;
}
// initialise resting times at random
//g->rest_limit = max_resttime;
randseq_range(g->num_vertices, max_resttime, g->rest_time, seed);
FREE("vertex_id", vertex_id);
FREE("vertex_shuffle", vertex_shuffle);
FREE("incidence_shuffle", incidence_shuffle);
FREE("time_shuffle", time_shuffle);
return g;
}
*/
graph_t *graph_reg_rand(index_t n, index_t d, index_t max_t, index_t max_rt, index_t seed)
{
index_t deg[128];
index_t freq[128];
deg[0] = d;
freq[0] = n;
return graph_config_rand(n, 1, max_t, max_rt, deg, freq, seed);
}
graph_t *graph_powlaw_rand(index_t n, index_t d, index_t max_t, index_t max_rt,
double alpha, index_t w, index_t seed)
{
index_t *deg = (index_t *) MALLOC("deg", w*sizeof(index_t));
index_t *freq = (index_t *) MALLOC("freq", w*sizeof(index_t));
mkpowlawdist(deg, freq, n, d, alpha, w);
graph_t *g = graph_config_rand(n, w, max_t, max_rt, deg, freq, seed);
FREE("deg", deg);
FREE("freq", freq);
return g;
}
/*
graph_t *graph_reg_rand_test(index_t n, index_t d, index_t max_t,
index_t max_rt, index_t num_targets, index_t seed)
{
index_t deg[128];
index_t freq[128];
deg[0] = d;
freq[0] = n;
return graph_config_rand_test(n, 1, max_t, max_rt, num_targets, deg, freq, seed);
}
*/
/****************************************************** Program entry point. */
int main(int argc, char **argv)
{
if(argc < 2 || !strcmp(argv[argc-1], "-h")) {
fprintf(stdout,
"usage: %s <type> <arguments>\n"
"available types (all parameters positive integers unless indicated otherwise):\n"
"\n"
" regular <n> <d> <t> <rt> <ns> <nt> <seed> (with 1 <= k <= n and n*d even)\n"
" regular-const <n> <d> <t> <rt> <ns> <nt> <seed> (with 1 <= k <= n and n*d even)\n"
" powlaw <n> <d> <al> <w> <t> <rt> <ns> <nt> <seed> (with al < 0.0, 2 <= w <= n, 1 <= k <= n)\n"
" powlaw-const <n> <d> <al> <w> <t> <rt> <ns> <nt> <seed> (with al < 0.0, 2 <= w <= n, 1 <= k <= n)\n"
"\n"
"-----------------------------------------------------------------------------\n"
"\tArguments\n"
"\t<n> : number of vertices, integer value 1 <= n <= 2^63\n"
"\t<d> : degree, n*d even\n"
"\t<t> : maximum timestamp, integer value 1 <= n <= 2^63\n"
"\t<rt> : maximum resting time, integer value 1 <= rt <= t\n"
"\t<ns> : number of sources, integer value in range 1 to <n>\n"
"\t<nt> : number of separators, integer value in range 1 to <n>\n"
"\t<al> : alpha, float value < 0.0\n"
"\t<w> : weight, integer value 2 <= w <= n\n"
"\t<seed> : seed for random number generator, integer value 1 <= seed <= 2^63\n"
,
argv[0]);
return 0;
}
fprintf(stderr, "invoked as:");
for(index_t f = 0; f < argc; f++)
fprintf(stderr, " %s", argv[f]);
fprintf(stderr, "\n");
graph_t *g = (graph_t *) 0;
char *type = argv[1];
index_t n = 0;
index_t d = 0;
index_t max_t = 0;
index_t max_rt = 0;
index_t num_sources = 0;
index_t num_separators = 0;
index_t seed = 0;
double al = 0.0;
index_t w = 0;
// d-regular graph
if(!strcmp("regular", type) || !strcmp("regular-const", type)) {
assert(argc-2 >= 7);
n = atol(argv[2+0]);
d = atol(argv[2+1]);
max_t = atol(argv[2+2]);
max_rt = atol(argv[2+3]);
num_sources = atol(argv[2+4]);
num_separators = atol(argv[2+5]);
seed = atol(argv[2+6]);
// error check
assert(n >= 1 && d >= 0 && n*d % 2 == 0 && seed >= 1);
assert(max_t >= 1);
assert(max_rt >= 1);
assert(num_sources >= 1 && num_sources <= n);
assert(num_separators >=0 && num_separators <= n);
//srand(seed);
// generate graph
g = graph_reg_rand(n, d, max_t, max_rt, seed);
g->gtype = REGULAR;
}
// powerlaw graph
if(!strcmp("powlaw", type) || !strcmp("powlaw-const", type)) {
assert(argc-2 >= 9);
n = atol(argv[2+0]);
d = atol(argv[2+1]);
al = atof(argv[2+2]);
w = atol(argv[2+3]);
max_t = atol(argv[2+4]);
max_rt = atol(argv[2+5]);
num_sources = atol(argv[2+6]);
num_separators = atol(argv[2+7]);
seed = atol(argv[2+8]);
// error check
assert(n >= 1 && d >= 0 && al < 0.0 && w >= 2 && w <= n && seed >= 1);
assert(max_t >= 1);
assert(max_rt >= 1);
assert(num_sources >= 1 && num_sources <= n);
assert(num_separators >=0 && num_separators <= n);
// generate graph
g = graph_powlaw_rand(n, d, max_t, max_rt, al, w, seed);
g->gtype = POWLAW;
}
srand(seed);
// set sources
index_t *sources = (index_t *) 0;
get_random_vertices(num_sources, n, irand(), &sources);
// set separators
index_t *separators = (index_t *) 0;
get_random_vertices(num_separators, n, irand(), &separators);
// set resting time
index_t *rest_time = (index_t *) 0;
if(!strcmp("regular-const", type) || !strcmp("powlaw-const", type)) {
get_rest_time_const(n, max_rt, &rest_time);
} else {
get_rest_time_random(n, max_rt, irand(), &rest_time);
}
graph_out_dimacs(stdout, g, max_rt);
graph_out_rest_time(stdout, n, rest_time);
graph_out_sources(stdout, num_sources, sources);
graph_out_separators(stdout, num_separators, separators);
FREE("vertices", sources);
FREE("vertices", separators);
FREE("rest_time", rest_time);
graph_free(g);
/*
// powerlaw graph
if(!strcmp("powlaw", type) || !strcmp("powlaw-const", type)) {
assert(argc-2 >= 9);
index_t n = atol(argv[2+0]);
index_t d = atol(argv[2+1]);
double al = atof(argv[2+2]);
index_t w = atol(argv[2+3]);
index_t k = atol(argv[2+4]);
index_t max_t = atol(argv[2+5]);
index_t max_rt = atol(argv[2+6]);
index_t num_sources = atol(argv[2+7]);
index_t seed = atol(argv[2+8]);
// error check
assert(n >= 1 && d >= 0 && al < 0.0 && w >= 2 && w <= n && k >= 1 && k <= n && seed >= 1);
assert(max_t >= 1);
assert(max_rt >= 1);
assert(num_sources >= 1 && num_sources <= n);
// generate graph
g = graph_powlaw_rand(n, d, max_t, max_rt, al, w, seed);
g->gtype = POWLAW;
// set sources
index_t *sources = (index_t *) MALLOC("sources", sizeof(index_t)*num_sources);
for(index_t i = 0; i < num_sources; i++)
sources[i] = UNDEFINED;
graph_set_rand_sources(k, num_sources, sources, g, seed);
if(!strcmp("powlaw-const", type)) {
graph_set_rest_time(n, max_rt, REST_TIME_CONST, g, seed);
}
graph_out_dimacs(stdout, g);
graph_out_sources(stdout, num_sources, sources);
FREE("sources", sources);
graph_free(g);
}
*/
/*
// d-regular graphs (for testing)
if(!strcmp("regular-test", type)) {
assert(argc-2 >= 7);
index_t n = atol(argv[2+0]);
index_t d = atol(argv[2+1]);
index_t k = atol(argv[2+2]);
index_t max_t = atol(argv[2+3]);
index_t max_rt = atol(argv[2+4]);
index_t num_targets = atol(argv[2+5]);
index_t seed = atol(argv[2+6]);
index_t max_c = 1; //init max colors
assert(n >= 1 && d >= 0 && n*d % 2 == 0 && k >= 1 && k <= n && seed >= 1);
assert(max_t >= 1 && max_c >= 1 && max_c <= 32);
assert(max_rt >= 1);
assert(num_targets >= 1 && num_targets <= n/k);
g = graph_reg_rand_test(n, d, max_t, max_rt, num_targets, seed);
g->gtype = REGULAR;
if(!strcmp(argv[argc-1],"-dir") || !strcmp(argv[argc-2],"-dir"))
g->is_directed = 1;
index_t num_sources = 1;
index_t *sources = (index_t *) MALLOC("sources", sizeof(index_t)*num_sources);
index_t *targets = (index_t *) MALLOC("targets", sizeof(index_t)*num_targets);
for(index_t i = 0; i < num_sources; i++)
sources[i] = UNDEFINED;
for(index_t i = 0; i < num_targets; i++)
targets[i] = UNDEFINED;
graph_set_rand_target_test(g, sources, targets, k, max_c, max_t, num_sources, num_targets, seed);
graph_out_dimacs(stdout, g);
graph_out_sources(stdout, num_sources, sources);
graph_out_targets(stdout, num_targets, targets);
FREE("sources", sources);
FREE("targets", targets);
graph_free(g);
}
*/
fprintf(stderr, "gen-count [%s, %s]: n = %ld, m = %ld, t = %ld, rt = %ld\n",
type,
g->is_directed ? "directed" : "undirected",
g->num_vertices,
g->num_edges,
g->time_limit,
max_rt
);
assert(malloc_balance == 0);
return 0;
}
|
simd8.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -O3 -fdump-tree-vect-details" } */
/* { dg-final { scan-tree-dump-times "vectorized 0 loops in function" 4 "vect" } } */
int a[1024];
void
foo (void)
{
#pragma omp simd if (0)
for (int i = 0; i < 1024; ++i)
a[i] = a[i] + 1;
}
void
bar (void)
{
#pragma omp simd if (0) safelen (256) simdlen (8)
for (int i = 0; i < 512; ++i)
a[i] = a[i] + 1;
}
void
baz (void)
{
#pragma omp simd safelen (256) simdlen (1)
for (int i = 0; i < 512; ++i)
a[i] = a[i] + 1;
}
void
qux (void)
{
#pragma omp simd simdlen (1) if (1)
for (int i = 0; i < 512; ++i)
a[i] = a[i] + 1;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pr60823-2.c | /* PR tree-optimization/60823 */
/* { dg-do run } */
/* { dg-require-effective-target vect_simd_clones } */
/* { dg-options "-O2 -fopenmp-simd" } */
#pragma omp declare simd simdlen(4) notinbranch
__attribute__((noinline)) int
foo (double c1, double c2)
{
double z1 = c1, z2 = c2;
int res = 100, i;
for (i = 0; i < 5; i++)
{
res = (z1 * z1 + z2 * z2 > 4.0) ? (i < res ? i : res) : res;
z1 = c1 + z1 * z1 - z2 * z2;
z2 = c2 + 2.0 * z1 * z2;
c1 += 0.5;
c2 += 0.5;
}
return res;
}
__attribute__((noinline, noclone)) void
bar (double *x, double *y)
{
asm volatile ("" : : "rm" (x), "rm" (y) : "memory");
}
int
main ()
{
int i;
double c[4] = { 0.0, 1.0, 0.0, 1.0 };
double d[4] = { 0.0, 1.0, 2.0, 0.0 };
int e[4];
bar (c, d);
#pragma omp simd safelen(4)
for (i = 0; i < 4; i++)
e[i] = foo (c[i], d[i]);
if (e[0] != 3 || e[1] != 1 || e[2] != 1 || e[3] != 2)
__builtin_abort ();
return 0;
}
|
doacross-3.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo (void)
{
int i, j;
#pragma omp for ordered (1)
for (i = 0; i < 64; i++)
{
#pragma omp ordered depend (sink: i + 1) /* { dg-warning "'depend' clause with 'sink' modifier waiting for lexically later iteration" } */
#pragma omp ordered depend (source)
}
#pragma omp for ordered (1)
for (i = 63; i >= 0; i--)
{
#pragma omp ordered depend (sink: i - 1) /* { dg-warning "'depend' clause with 'sink' modifier waiting for lexically later iteration" } */
#pragma omp ordered depend (source)
}
#pragma omp for ordered (2) collapse (2)
for (i = 0; i < 64; i++)
for (j = 0; j < 64; j++)
{
#pragma omp ordered depend (sink: i + 1, j - 2) /* { dg-warning "'depend' clause with 'sink' modifier waiting for lexically later iteration" } */
#pragma omp ordered depend (source)
}
#pragma omp for ordered (2) collapse (2)
for (i = 63; i >= 0; --i)
for (j = 0; j < 64; j++)
{
#pragma omp ordered depend (sink: i - 2, j - 2) /* { dg-warning "'depend' clause with 'sink' modifier waiting for lexically later iteration" } */
#pragma omp ordered depend (source)
}
#pragma omp for ordered (2) collapse (2)
for (i = 0; i < 64; i++)
for (j = 0; j < 64; j++)
{
#pragma omp ordered depend (sink: i - 1, j + 2)
#pragma omp ordered depend (source)
}
#pragma omp for ordered (2) collapse (2)
for (i = 63; i >= 0; --i)
for (j = 0; j < 64; j++)
{
#pragma omp ordered depend (sink: i + 2, j + 2)
#pragma omp ordered depend (source)
}
#pragma omp for ordered (1)
for (i = 0; i < 64; i += 2)
{
#pragma omp ordered depend (sink: i - 1) /* { dg-warning "'depend' clause with 'sink' modifier refers to iteration never in the iteration space" } */
#pragma omp ordered depend (source)
}
}
|
test_example.valid_rose_output.c | #pragma omp greg
extern int omp_get_num_threads();
extern int omp_get_thread_num();
extern void genericForHeader();
extern void foo();
extern void bar();
int a;
int b;
int c;
int main(int argc,char **argv)
{
int d[10];
int e;
int f;
#pragma omp parallel private(a, d) firstprivate(b, c) default(shared)
{
foo();
foo();
#pragma omp for ordered
for (int iterator = 0; iterator < 10; iterator++) {
a = (b = (c = 0));
}
}
bar();
#pragma omp parallel private ( a, d ) firstprivate ( b, c ) default ( shared )
{
{
d[0] = (e = (f = 1));
}
}
#pragma omp critical
{
foo();
bar();
}
#pragma omp for nowait
for (int i = 0; i < 1; ++i) {
bar();
}
if (omp_get_thread_num() == 0) {
bar();
}
else {
omp_get_thread_num();
}
#pragma omp critical
{
foo();
{
}
}
#pragma omp for
for (int i = 0; i < 1; ++i) {{
d[0] = (e = (f = 1));
}
}
if (omp_get_thread_num() == 0) {{
d[0] = (e = (f = 1));
}
}
else {
omp_get_thread_num();
}
return 0;
}
|
GB_unaryop__ainv_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_int32
// op(A') function: GB_tran__ainv_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_int32
(
uint32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FiniteDifferenceLibrary.c | #define dll_EXPORTS = 1
#include "FiniteDifferenceLibrary.h"
DLL_EXPORT int openMPtest(int nThreads)
{
omp_set_num_threads(nThreads);
int nThreads_running;
#pragma omp parallel
{
if (omp_get_thread_num() == 0)
{
nThreads_running = omp_get_num_threads();
}
}
return nThreads_running;
}
int fdiff_direct_neumann(const float *inimagefull, float *outimageXfull, float *outimageYfull, float *outimageZfull, float *outimageCfull, long nx, long ny, long nz, long nc)
{
size_t volume = nx * ny * nz;
const float *inimage = inimagefull;
float *outimageX = outimageXfull;
float *outimageY = outimageYfull;
float *outimageZ = outimageZfull;
int offset1 = (nz - 1) * nx * ny; //ind to beginning of last slice
int offset2 = offset1 + (ny - 1) * nx; //ind to beginning of last row
long c;
int z_dim = nz > 1 ? 1: 0;
for (c = 0; c < nc; c++)
{
#pragma omp parallel
{
long ind, k, j, i;
float pix0;
//run over all and then fix boundaries
#pragma omp for nowait
for (ind = 0; ind < nx * ny * (nz - 1); ind++)
{
pix0 = -inimage[ind];
outimageX[ind] = pix0 + inimage[ind + 1];
outimageY[ind] = pix0 + inimage[ind + nx];
outimageZ[ind] = pix0 + inimage[ind + nx * ny];
}
#pragma omp for nowait
for (ind = 0; ind < nx * (ny - 1); ind++)
{
pix0 = -inimage[ind + offset1];
outimageX[ind + offset1] = pix0 + inimage[ind + offset1 + 1];
outimageY[ind + offset1] = pix0 + inimage[ind + offset1 + nx];
}
#pragma omp for
for (ind = 0; ind < nx - 1; ind++)
{
pix0 = -inimage[ind + offset2];
outimageX[ind + offset2] = pix0 + inimage[ind + offset2 + 1];
}
//boundaries
#pragma omp for nowait
for (k = 0; k < nz; k++)
{
for (i = 0; i < nx; i++)
{
outimageY[(k * ny * nx) + (ny - 1) * nx + i] = 0;
}
}
#pragma omp for nowait
for (k = 0; k < nz; k++)
{
for (j = 0; j < ny; j++)
{
outimageX[k * ny * nx + j * nx + nx - 1] = 0;
}
}
if (z_dim)
{
#pragma omp for
for (ind = 0; ind < ny * nx; ind++)
{
outimageZ[nx * ny * (nz - 1) + ind] = 0;
}
}
}
inimage += volume;
outimageX += volume;
outimageY += volume;
outimageZ += volume;
}
//now the rest of the channels
if (nc > 1)
{
long ind;
for (c = 0; c < nc - 1; c++)
{
float *outimageC = outimageCfull + c * volume;
const float *inimage = inimagefull + c * volume;
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimageC[ind] = -inimage[ind] + inimage[ind + volume];
}
}
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimageCfull[(nc - 1) * volume + ind] = 0;
}
}
return 0;
}
int fdiff_direct_periodic(const float *inimagefull, float *outimageXfull, float *outimageYfull, float *outimageZfull, float *outimageCfull, long nx, long ny, long nz, long nc)
{
size_t volume = nx * ny * nz;
const float *inimage = inimagefull;
float *outimageX = outimageXfull;
float *outimageY = outimageYfull;
float *outimageZ = outimageZfull;
int offset1 = (nz - 1) * nx * ny; //ind to beginning of last slice
int offset2 = offset1 + (ny - 1) * nx; //ind to beginning of last row
long c;
for (c = 0; c < nc; c++)
{
#pragma omp parallel
{
long ind, k;
float pix0;
//run over all and then fix boundaries
#pragma omp for nowait
for (ind = 0; ind < nx * ny * (nz - 1); ind++)
{
pix0 = -inimage[ind];
outimageX[ind] = pix0 + inimage[ind + 1];
outimageY[ind] = pix0 + inimage[ind + nx];
outimageZ[ind] = pix0 + inimage[ind + nx * ny];
}
#pragma omp for nowait
for (ind = 0; ind < nx * (ny - 1); ind++)
{
pix0 = -inimage[ind + offset1];
outimageX[ind + offset1] = pix0 + inimage[ind + offset1 + 1];
outimageY[ind + offset1] = pix0 + inimage[ind + offset1 + nx];
}
#pragma omp for
for (ind = 0; ind < nx - 1; ind++)
{
pix0 = -inimage[ind + offset2];
outimageX[ind + offset2] = pix0 + inimage[ind + offset2 + 1];
}
//boundaries
#pragma omp for nowait
for (k = 0; k < nz; k++)
{
for (int i = 0; i < nx; i++)
{
int ind1 = (k * ny * nx);
int ind2 = ind1 + (ny - 1) * nx;
outimageY[ind2 + i] = -inimage[ind2 + i] + inimage[ind1 + i];
}
}
#pragma omp for nowait
for (k = 0; k < nz; k++)
{
for (int j = 0; j < ny; j++)
{
int ind1 = k * ny * nx + j * nx;
int ind2 = ind1 + nx - 1;
outimageX[ind2] = -inimage[ind2] + inimage[ind1];
}
}
if (nz > 1)
{
#pragma omp for nowait
for (ind = 0; ind < ny * nx; ind++)
{
outimageZ[nx * ny * (nz - 1) + ind] = -inimage[nx * ny * (nz - 1) + ind] + inimage[ind];
}
}
}
inimage += volume;
outimageX += volume;
outimageY += volume;
outimageZ += volume;
}
//now the rest of the channels
if (nc > 1)
{
long ind;
for (c = 0; c < nc - 1; c++)
{
float *outimageC = outimageCfull + c * volume;
const float *inimage = inimagefull + c * volume;
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimageC[ind] = -inimage[ind] + inimage[ind + volume];
}
}
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimageCfull[(nc - 1) * volume + ind] = -inimagefull[(nc - 1) * volume + ind] + inimagefull[ind];
}
}
return 0;
}
int fdiff_adjoint_neumann(float *outimagefull, const float *inimageXfull, const float *inimageYfull, const float *inimageZfull, const float *inimageCfull, long nx, long ny, long nz, long nc)
{
//runs over full data in x, y, z. then corrects elements for bounday conditions and sums
size_t volume = nx * ny * nz;
//assumes nx and ny > 1
int z_dim = nz - 1;
float *outimage = outimagefull;
const float *inimageX = inimageXfull;
const float *inimageY = inimageYfull;
const float *inimageZ = inimageZfull;
float *tempX = (float *)malloc(volume * sizeof(float));
float *tempY = (float *)malloc(volume * sizeof(float));
float *tempZ;
if (z_dim)
{
tempZ = (float *)malloc(volume * sizeof(float));
}
long c;
for (c = 0; c < nc; c++) //just calculating x, y and z in each channel here
{
#pragma omp parallel
{
long ind, k;
#pragma omp for
for (ind = 1; ind < nx * ny * nz; ind++)
{
tempX[ind] = -inimageX[ind] + inimageX[ind - 1];
}
#pragma omp for
for (ind = nx; ind < nx * ny * nz; ind++)
{
tempY[ind] = -inimageY[ind] + inimageY[ind - nx];
}
//boundaries
#pragma omp for
for (k = 0; k < nz; k++)
{
for (int j = 0; j < ny; j++)
{
tempX[k * ny * nx + j * nx] = -inimageX[k * ny * nx + j * nx];
tempX[k * ny * nx + j * nx + nx - 1] = inimageX[k * ny * nx + j * nx + nx - 2];
}
}
#pragma omp for
for (k = 0; k < nz; k++)
{
for (int i = 0; i < nx; i++)
{
tempY[(k * ny * nx) + i] = -inimageY[(k * ny * nx) + i];
tempY[(k * ny * nx) + nx * (ny - 1) + i] = inimageY[(k * ny * nx) + nx * (ny - 2) + i];
}
}
if (z_dim)
{
#pragma omp for
for (ind = nx * ny; ind < nx * ny * nz; ind++)
{
tempZ[ind] = -inimageZ[ind] + inimageZ[ind - nx * ny];
}
#pragma omp for
for (ind = 0; ind < ny * nx; ind++)
{
tempZ[ind] = -inimageZ[ind];
tempZ[nx * ny * (nz - 1) + ind] = inimageZ[nx * ny * (nz - 2) + ind];
}
#pragma omp for
for (ind = 0; ind < volume; ind++)
{
outimage[ind] = tempX[ind] + tempY[ind] + tempZ[ind];
}
}
else
{
#pragma omp for
for (ind = 0; ind < volume; ind++)
{
outimage[ind] = tempX[ind] + tempY[ind];
}
}
}
outimage += volume;
inimageX += volume;
inimageY += volume;
inimageZ += volume;
}
free(tempX);
free(tempY);
if (z_dim)
free(tempZ);
// //now the rest of the channels
if (nc > 1)
{
long ind;
for (c = 1; c < nc - 1; c++)
{
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimagefull[ind + c * volume] += -inimageCfull[ind + c * volume] + inimageCfull[ind + (c - 1) * volume];
}
}
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimagefull[ind] += -inimageCfull[ind];
outimagefull[(nc - 1) * volume + ind] += inimageCfull[(nc - 2) * volume + ind];
}
}
return 0;
}
int fdiff_adjoint_periodic(float *outimagefull, const float *inimageXfull, const float *inimageYfull, const float *inimageZfull, const float *inimageCfull, long nx, long ny, long nz, long nc)
{
//runs over full data in x, y, z. then correctects elements for bounday conditions and sums
size_t volume = nx * ny * nz;
//assumes nx and ny > 1
int z_dim = nz - 1;
float *outimage = outimagefull;
const float *inimageX = inimageXfull;
const float *inimageY = inimageYfull;
const float *inimageZ = inimageZfull;
float *tempX = (float *)malloc(volume * sizeof(float));
float *tempY = (float *)malloc(volume * sizeof(float));
float *tempZ;
if (z_dim)
{
tempZ = (float *)malloc(volume * sizeof(float));
}
long c;
for (c = 0; c < nc; c++) //just calculating x, y and z in each channel here
{
#pragma omp parallel
{
long ind, k;
//run over all and then fix boundaries
#pragma omp for
for (ind = 1; ind < volume; ind++)
{
tempX[ind] = -inimageX[ind] + inimageX[ind - 1];
}
#pragma omp for
for (ind = nx; ind < volume; ind++)
{
tempY[ind] = -inimageY[ind] + inimageY[ind - nx];
}
//boundaries
#pragma omp for
for (k = 0; k < nz; k++)
{
for (int i = 0; i < nx; i++)
{
tempY[(k * ny * nx) + i] = -inimageY[(k * ny * nx) + i] + inimageY[(k * ny * nx) + nx * (ny - 1) + i];
}
}
#pragma omp for
for (k = 0; k < nz; k++)
{
for (int j = 0; j < ny; j++)
{
tempX[k * ny * nx + j * nx] = -inimageX[k * ny * nx + j * nx] + inimageX[k * ny * nx + j * nx + nx - 1];
}
}
if (z_dim)
{
#pragma omp for
for (ind = nx * ny; ind < nx * ny * nz; ind++)
{
tempZ[ind] = -inimageZ[ind] + inimageZ[ind - nx * ny];
}
#pragma omp for
for (ind = 0; ind < ny * nx; ind++)
{
tempZ[ind] = -inimageZ[ind] + inimageZ[nx * ny * (nz - 1) + ind];
}
#pragma omp for
for (ind = 0; ind < volume; ind++)
{
outimage[ind] = tempX[ind] + tempY[ind] + tempZ[ind];
}
}
else
{
#pragma omp for
for (ind = 0; ind < volume; ind++)
{
outimage[ind] = tempX[ind] + tempY[ind];
}
}
}
outimage += volume;
inimageX += volume;
inimageY += volume;
inimageZ += volume;
}
free(tempX);
free(tempY);
if (z_dim)
free(tempZ);
//now the rest of the channels
if (nc > 1)
{
long ind;
for (c = 1; c < nc; c++)
{
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimagefull[ind + c * volume] += -inimageCfull[ind + c * volume] + inimageCfull[ind + (c - 1) * volume];
}
}
#pragma omp parallel for
for (ind = 0; ind < volume; ind++)
{
outimagefull[ind] += -inimageCfull[ind] + inimageCfull[(nc - 1) * volume + ind];
}
}
return 0;
}
DLL_EXPORT int fdiff4D(float *imagefull, float *gradCfull, float *gradZfull, float *gradYfull, float *gradXfull, long nc, long nz, long ny, long nx, int boundary, int direction, int nThreads)
{
int nThreads_initial;
threads_setup(nThreads, &nThreads_initial);
if (boundary)
{
if (direction)
fdiff_direct_periodic(imagefull, gradXfull, gradYfull, gradZfull, gradCfull, nx, ny, nz, nc);
else
fdiff_adjoint_periodic(imagefull, gradXfull, gradYfull, gradZfull, gradCfull, nx, ny, nz, nc);
}
else
{
if (direction)
fdiff_direct_neumann(imagefull, gradXfull, gradYfull, gradZfull, gradCfull, nx, ny, nz, nc);
else
fdiff_adjoint_neumann(imagefull, gradXfull, gradYfull, gradZfull, gradCfull, nx, ny, nz, nc);
}
omp_set_num_threads(nThreads_initial);
return 0;
}
DLL_EXPORT int fdiff3D(float *imagefull, float *gradZfull, float *gradYfull, float *gradXfull, long nz, long ny, long nx, int boundary, int direction, int nThreads)
{
int nThreads_initial;
threads_setup(nThreads, &nThreads_initial);
if (boundary)
{
if (direction)
fdiff_direct_periodic(imagefull, gradXfull, gradYfull, gradZfull, NULL, nx, ny, nz, 1);
else
fdiff_adjoint_periodic(imagefull, gradXfull, gradYfull, gradZfull, NULL, nx, ny, nz, 1);
}
else
{
if (direction)
fdiff_direct_neumann(imagefull, gradXfull, gradYfull, gradZfull, NULL, nx, ny, nz, 1);
else
fdiff_adjoint_neumann(imagefull, gradXfull, gradYfull, gradZfull, NULL, nx, ny, nz, 1);
}
omp_set_num_threads(nThreads_initial);
return 0;
}
DLL_EXPORT int fdiff2D(float *imagefull, float *gradYfull, float *gradXfull, long ny, long nx, int boundary, int direction, int nThreads)
{
int nThreads_initial;
threads_setup(nThreads, &nThreads_initial);
if (boundary)
{
if (direction)
fdiff_direct_periodic(imagefull, gradXfull, gradYfull, NULL, NULL, nx, ny, 1, 1);
else
fdiff_adjoint_periodic(imagefull, gradXfull, gradYfull, NULL, NULL, nx, ny, 1, 1);
}
else
{
if (direction)
fdiff_direct_neumann(imagefull, gradXfull, gradYfull, NULL, NULL, nx, ny, 1, 1);
else
fdiff_adjoint_neumann(imagefull, gradXfull, gradYfull, NULL, NULL, nx, ny, 1, 1);
}
omp_set_num_threads(nThreads_initial);
return 0;
}
|
ctradd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztradd.c, normal z -> c, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pctradd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
* n >= 0.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa = PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_ctradd
* @sa plasma_ctradd
* @sa plasma_dtradd
* @sa plasma_stradd
*
******************************************************************************/
int plasma_ctradd(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t beta, plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (pA == NULL) {
plasma_error("NULL A");
return -6;
}
int am, an;
if (transa == PlasmaNoTrans) {
am = m;
an = n;
}
else {
am = n;
an = m;
}
int bm = m;
int bn = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (pB == NULL) {
plasma_error("NULL B");
return -9;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_tradd(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_ctradd(uplo, transa,
alpha, A,
beta, B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pctradd() function from the PBLAS library. Non-blocking tile version of
* plasma_ctradd(). May return before the computation is finished. Operates
* on matrices stored by tiles. All matrices are passed through descriptors.
* All dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ctradd
* @sa plasma_omp_ctradd
* @sa plasma_omp_dtradd
* @sa plasma_omp_stradd
*
******************************************************************************/
void plasma_omp_ctradd(plasma_enum_t uplo, plasma_enum_t transa,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_complex32_t beta, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int am = transa == PlasmaNoTrans ? A.m : A.n;
if ((alpha == 0.0 || am == 0) && beta == 1.0)
return;
// Call parallel function.
plasma_pctradd(uplo, transa,
alpha, A,
beta, B,
sequence, request);
}
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include <stdint.h>
#include "npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]);
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]);
static void ipow46(double a, int exponent, double *result);
static void setup(void);
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]);
static void print_timers(void);
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]);
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static int ilog2(int n);
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]);
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
static int realmain(void *carg)
{
unsigned arg = (uintptr_t)carg;
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[NZ][NY][NX];
static dcomplex pad1[3];
static dcomplex u1[NZ][NY][NX];
static dcomplex pad2[3];
static dcomplex u2[NZ][NY][NX];
static dcomplex pad3[3];
static int indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char class;
omp_set_num_threads(arg);
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
setup();
#pragma omp parallel
{
compute_indexmap(indexmap, dims[2]);
#pragma omp single
{
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
}
fft(1, u1, u0);
} /* end parallel */
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
timer_start(T_TOTAL);
if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP);
#pragma omp parallel private(iter) firstprivate(niter)
{
compute_indexmap(indexmap, dims[2]);
#pragma omp single
{
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_SETUP);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_FFT);
}
fft(1, u1, u0);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_FFT);
}
for (iter = 1; iter <= niter; iter++) {
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_EVOLVE);
}
evolve(u0, u1, iter, indexmap, dims[0]);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_FFT);
}
fft(-1, u1, u2);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_FFT);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_CHECKSUM);
}
checksum(iter, u2, dims[0]);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_CHECKSUM);
}
}
#pragma omp single
verify(NX, NY, NZ, niter, &verified, &class);
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_TOTAL);
total_time = timer_read(T_TOTAL);
if( total_time != 0.0) {
mflops = 1.0e-6*(double)(NTOTAL) *
(14.8157+7.19641*log((double)(NTOTAL))
+ (5.23518+7.21113*log((double)(NTOTAL)))*niter)
/total_time;
} else {
mflops = 0.0;
}
#ifdef BOMP
backend_create_time(arg);
#endif
printf("Computetime %d %f\n", arg, total_time);
printf("client done\n");
/* c_print_results("FT", class, NX, NY, NZ, niter, nthreads, */
/* total_time, mflops, " floating point", verified, */
/* NPBVERSION, COMPILETIME, */
/* CS1, CS2, CS3, CS4, CS5, CS6, CS7); */
if (TIMERS_ENABLED == TRUE) print_timers();
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i, j, k;
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0, start, an, dummy;
static double tmp[NX*2*MAXDIM+1];
int i,j,t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an);
dummy = randlc(&start, an);
ipow46(A, 2*NX*NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < dims[0][2]; k++) {
x0 = start;
vranlc(2*NX*dims[0][1], &x0, A, tmp);
t = 1;
for (j = 0; j < dims[0][1]; j++)
for (i = 0; i < NX; i++) {
u0[k][j][i].real = tmp[t++];
u0[k][j][i].imag = tmp[t++];
}
if (k != dims[0][2]) dummy = randlc(&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a, int exponent, double *result) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0) return;
q = a;
r = 1;
n = exponent;
while (n > 1) {
n2 = n/2;
if (n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
} else {
dummy = randlc(&r, q);
n = n-1;
}
}
dummy = randlc(&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf(" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
for (i = 0;i < 3 ; i++) {
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
for (i = 0; i < 3; i++) {
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i, j, k, ii, ii2, jj, ij2, kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
#pragma omp single
{
ap = - 4.0 * ALPHA * PI * PI;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= EXPMAX; i++) {
ex[i] = ex[i-1]*ex[1];
}
} /* end single */
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy " };
for (i = 0; i < T_MAX; i++) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */
} else {
cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, jj;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
y0[i][j].real = x[k][j+jj][i].real;
y0[i][j].imag = x[k][j+jj][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[0],
d[0], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
xout[k][j+jj][i].real = y0[i][j].real;
xout[k][j+jj][i].imag = y0[i][j].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
y0[j][i].real = x[k][j][i+ii].real;
y0[j][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[1],
d[1], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[j][i].real;
xout[k][j][i+ii].imag = y0[j][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0;i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (j = 0; j < d[1]; j++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
y0[k][i].real = x[k][j][i+ii].real;
y0[k][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[2],
d[2], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[k][i].real;
xout[k][j][i+ii].imag = y0[k][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init (int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m,nu,ku,i,j,ln;
double t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0].real = (double)m;
u[0].imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++) {
t = PI / ln;
for (i = 0; i <= ln - 1; i++) {
ti = i * t;
u[i+ku].real = cos(ti);
u[i+ku].imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i,j,l,mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = (int)(u[0].real);
if ((is != 1 && is != -1) || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\n"
"one of the input parameters is invalid%5d%5d%5d\n",
is, m, mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l+=2) {
fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y);
if (l == m) break;
fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
for (j = 0; j < n; j++) {
for (i = 0; i < fftblock; i++) {
x[j][i].real = y[j][i].real;
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22;
dcomplex u1,x11,x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l-1 == 0) {
lk = 1;
} else {
lk = 2 << ((l - 1)-1);
}
if (m-l == 0) {
li = 1;
} else {
li = 2 << ((m - l)-1);
}
lj = 2 * lk;
ku = li;
for (i = 0; i < li; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1.real = u[ku+i].real;
u1.imag = u[ku+i].imag;
} else {
u1.real = u[ku+i].real;
u1.imag = -u[ku+i].imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k < lk; k++) {
for (j = 0; j < ny; j++) {
double x11real, x11imag;
double x21real, x21imag;
x11real = x[i11+k][j].real;
x11imag = x[i11+k][j].imag;
x21real = x[i12+k][j].real;
x21imag = x[i12+k][j].imag;
y[i21+k][j].real = x11real + x21real;
y[i21+k][j].imag = x11imag + x21imag;
y[i22+k][j].real = u1.real * (x11real - x21real)
- u1.imag * (x11imag - x21imag);
y[i22+k][j].imag = u1.real * (x11imag - x21imag)
+ u1.imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while (nn < n) {
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j, q,r,s, ierr;
dcomplex chk,allchk;
chk.real = 0.0;
chk.imag = 0.0;
#pragma omp for nowait
for (j = 1; j <= 1024; j++) {
q = j%NX+1;
if (q >= xstart[0] && q <= xend[0]) {
r = (3*j)%NY+1;
if (r >= ystart[0] && r <= yend[0]) {
s = (5*j)%NZ+1;
if (s >= zstart[0] && s <= zend[0]) {
cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);
}
}
}
}
#pragma omp critical
{
sums[i].real += chk.real;
sums[i].imag += chk.imag;
}
#pragma omp barrier
#pragma omp single
{
/* complex % real */
sums[i].real = sums[i].real/(double)(NTOTAL);
sums[i].imag = sums[i].imag/(double)(NTOTAL);
/* printf("T = %5d Checksum = %22.12e %22.12e\n", */
/* i, sums[i].real, sums[i].imag); */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6+1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02 };
double vdata_imag_s[6+1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02 };
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6+1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02 };
double vdata_imag_w[6+1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02 };
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6+1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02 };
double vdata_imag_a[6+1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02 };
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20+1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02 };
double vdata_imag_b[20+1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02 };
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20+1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02 };
double vdata_imag_c[20+1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02 };
epsilon = 1.0e-12;
*verified = TRUE;
*class = 'U';
if (d1 == 64 &&
d2 == 64 &&
d3 == 64 &&
nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 128 &&
d2 == 128 &&
d3 == 32 &&
nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 256 &&
d2 == 256 &&
d3 == 128 &&
nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 256 &&
d3 == 256 &&
nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 512 &&
d3 == 512 &&
nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
}
if (*class != 'U') {
printf("Result verification successful\n");
} else {
printf("Result verification failed\n");
}
printf("class = %1c\n", *class);
}
#define STACK_SIZE (8 * 1024 * 1024)
int main(int argc, char** argv)
{
if (argc != 2) { /* Print usage */
printf("Usage: %s <Number of threads>\n", argv[0]);
exit(-1);
}
#ifdef BOMP
backend_span_domain(atoi(argv[1]), STACK_SIZE);
bomp_custom_init();
backend_thread_create_varstack(realmain, (void*)((uint64_t)atoi(argv[1])),
STACK_SIZE);
backend_thread_exit();
#else /* BOMP */
realmain(atoi(argv[1]));
#endif /* BOMP */
}
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
#include "LAGraph_bfs_pushpull.h"
#include "../config.h"
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
#define LAGRAPH_ERROR(message,info) \
{ \
fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \
message, info, __FILE__, __LINE__) ; \
LAGRAPH_FREE_ALL ; \
return (info) ; \
}
#define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y))
#define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y))
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
) {
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if(v_output == NULL || (A == NULL && AT == NULL)) {
// required output argument is missing
LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_sc = GrB_DESC_SC ;
GrB_Descriptor desc_rc = GrB_DESC_RC ;
GrB_Descriptor desc_r = GrB_DESC_R ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
// A is provided. AT may or may not be provided
GrB_Matrix_nrows(&nrows, A) ;
GrB_Matrix_ncols(&ncols, A) ;
GrB_Matrix_nvals(&nvalA, A) ;
bool use_vxm_with_A = true ;
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if(nrows != ncols) {
// A must be square
LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = Config_GetOMPThreadCount();
nthreads = LAGRAPH_MIN(n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX(nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
GrB_Vector_new(&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ;
if(!vsparse) {
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if(compute_tree) {
// create an integer vector q, and set q(source) to source+1
GrB_Vector_new(&q, int_type, n) ;
GrB_Vector_setElement(q, source + 1, source) ;
if(n > INT32_MAX) {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
} else {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
}
// create the empty parent vector
GrB_Vector_new(&pi, int_type, n) ;
if(!vsparse) {
// make pi a dense vector of all zeros
GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
GrB_Vector_setElement(pi, source + 1, source) ;
} else {
// create a boolean vector q, and set q(source) to true
GrB_Vector_new(&q, GrB_BOOL, n) ;
GrB_Vector_setElement(q, true, source) ;
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for(int64_t level = 1 ; ; level++) {
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
GrB_assign(v, q, NULL, level, GrB_ALL, n, desc_s) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if(nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if(vsparse && nvisited > vlimit) {
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
GrB_assign(v, v, NULL, 0, GrB_ALL, n, desc_sc) ;
GrB_Vector_nvals(&ignore, v) ;
if(compute_tree) {
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ;
GrB_Vector_nvals(&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if(push_pull) {
double pushwork = d * nq ;
double expected = (double) n / (double)(nvisited + 1) ;
double per_dot = LAGRAPH_MIN(d, expected) ;
double binarysearch = (3 * (1 + log2((double) nq))) ;
double pullwork = (n - nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if(use_vxm_with_A) {
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
GrB_vxm(q, v, NULL, first_semiring, q, A, desc_rc) ;
} else {
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
GrB_mxv(q, v, NULL, second_semiring, AT, q, desc_rc) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if(compute_tree) {
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
GrB_assign(pi, q, NULL, q, GrB_ALL, n, desc_s) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
GrB_Index *qi ;
if(n > INT32_MAX) {
int64_t *qx ;
GxB_Vector_export(&q, &int_type, &n, &nq, &qi,
(void **)(&qx), NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int64_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import(&q, int_type, n, nq, &qi,
(void **)(&qx), NULL) ;
} else {
int32_t *qx ;
GxB_Vector_export(&q, &int_type, &n, &nq, &qi,
(void **)(&qx), NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int32_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import(&q, int_type, n, nq, &qi,
(void **)(&qx), NULL) ;
}
} else {
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
GrB_Vector_nvals(&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if(compute_tree) {
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
omp_zsyr2k_batch.c | /**
* @file omp_zsyr2k_batch.c
*
* @brief BBLAS omp_zsyr2k_batch double _Complex routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @precisions normal z -> c d s
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define COMPLEX
/**
Purpose
-------
<b>zsyr2k_batch</b> is a batch version of zsyr2k.
It performs one of the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T +
beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] +
beta[i]*arrayC[i],
where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym-
metric matrix and arrayA[i] and arrayB[i] are N[i] by K[i] matrices in the
first case and K[i] by N[i] matrices in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i] is to
be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
the matrix is to be referenced.
- = 'BblasLower' Only the lower triangular part of
the matrix is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T +
alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i]
- = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayB[i] +
alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrices arrayA[i] and arrayB[i],
and upon entry with trans[i] = 'BblasTrans',
K[i] specifies the number of rows of the matrices arrayA[i] and arrayB[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>complex_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a COMPLEX_16 matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayB[i] must contain the elements of arrayB[i], otherwise
the leading K[i] by N[i] part of the arrayB[i] must contain the
elements of arrayB[i].
@param[in]
ldb Array of <tt>int</tt>.
On entry, ldb[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
ldb[i] must be at least max( 1, N[i] ), otherwise ldb[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>complex_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a COMPLEX_16 matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the symmetric
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper triangular part
of the updated matrix.
Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the symmetric matrix and the
strictly upper triangular part of arrayC[i] is not referenced.
On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith zsyr2k in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_zsyr2k_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const BBLAS_Complex64_t *alpha,
const BBLAS_Complex64_t **arrayA, const int *lda,
const BBLAS_Complex64_t **arrayB, const int *ldb,
const BBLAS_Complex64_t *beta, BBLAS_Complex64_t **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA, LDB;
char func_name[15] = "zsyr2k_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
LDB = N[first_index];
} else
{
LDA = K[first_index];
LDB = K[first_index];
}
if (lda[first_index] < max(1,LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 || K[first_index] == 0 ||
(alpha[first_index] == (BBLAS_Complex64_t)0.0 ||
beta[first_index] == (BBLAS_Complex64_t)1.0))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_zsyr2k */
cblas_zsyr2k(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
CBLAS_SADDR(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index],
CBLAS_SADDR(beta[first_index]),
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private (batch_iter, LDA, LDB)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans)
{
LDA = N[batch_iter];
LDB = N[batch_iter];
} else
{
LDA = K[batch_iter];
LDB = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter);
info[batch_iter] = BBLAS_ERR_LDB;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 || K[batch_iter] == 0 ||
((alpha[batch_iter] == (BBLAS_Complex64_t)0.0) &&
beta[batch_iter] == (BBLAS_Complex64_t)1.0))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_zsyr2k(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
CBLAS_SADDR(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter],
CBLAS_SADDR(beta[batch_iter]),
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
GB_binop__lt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_uint8
// A.*B function (eWiseMult): GB_AemultB__lt_uint8
// A*D function (colscale): GB_AxD__lt_uint8
// D*A function (rowscale): GB_DxB__lt_uint8
// C+=B function (dense accum): GB_Cdense_accumB__lt_uint8
// C+=b function (dense accum): GB_Cdense_accumb__lt_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_uint8
// C=scalar+B GB_bind1st__lt_uint8
// C=scalar+B' GB_bind1st_tran__lt_uint8
// C=A+scalar GB_bind2nd__lt_uint8
// C=A'+scalar GB_bind2nd_tran__lt_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT8 || GxB_NO_LT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bshift_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_uint16
// A.*B function (eWiseMult): GB_AemultB__bshift_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bshift_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint16
// C=scalar+B GB_bind1st__bshift_uint16
// C=scalar+B' GB_bind1st_tran__bshift_uint16
// C=A+scalar GB_bind2nd__bshift_uint16
// C=A'+scalar GB_bind2nd_tran__bshift_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_bitshift_uint16 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bshift_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mlp_example_bf16_amx_numa.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas, Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include <numa.h>
/* include c-based dnn library */
#include "../common/dnn_common.h"
#define CHECK_L1
#define OVERWRITE_DOUTPUT_BWDUPD
#define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16))
#define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), _mm512_cvtneps_pbh((B)))
LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf_bf16(buf, size);
for (i = 0; i < (int)size; ++i) {
libxsmm_bfloat16_hp tmp;
tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
buf[i] = tmp.i[1];
}
}
#if 0
LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk)
{
int k1, k2, c1, c2;
int kBlocks = K/bk;
int cBlocks = C/bc;
LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk);
LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2);
for (k1 = 0; k1 < kBlocks; k1++) {
for (c1 = 0; c1 < cBlocks; c1++) {
for (c2 = 0; c2 < bc; c2++) {
for (k2 = 0; k2 < bk; k2++) {
LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk);
}
}
}
}
}
#endif
typedef enum my_eltwise_fuse {
MY_ELTWISE_FUSE_NONE = 0,
MY_ELTWISE_FUSE_BIAS = 1,
MY_ELTWISE_FUSE_RELU = 2,
MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU
} my_eltwise_fuse;
typedef enum my_pass {
MY_PASS_FWD = 1,
MY_PASS_BWD_D = 2,
MY_PASS_BWD_W = 4,
MY_PASS_BWD = 6
} my_pass;
typedef struct my_opt_config {
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
float lr;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_opt_config;
typedef struct my_smax_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_smax_fwd_config;
typedef struct my_smax_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
float loss_weight;
libxsmm_barrier* barrier;
} my_smax_bwd_config;
typedef struct my_fc_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint fwd_bf;
libxsmm_blasint fwd_2d_blocking;
libxsmm_blasint fwd_row_teams;
libxsmm_blasint fwd_column_teams;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_bsmmfunction fwd_config_kernel;
libxsmm_bsmmfunction tilerelease_kernel;
libxsmm_bsmmfunction_reducebatch_strd gemm_fwd;
libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2;
libxsmm_bmmfunction_reducebatch_strd gemm_fwd3;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8;
libxsmm_meltwfunction_cvtfp32bf16 fwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_cvtfp32bf16_act fwd_cvtfp32bf16_relu_kernel;
libxsmm_meltwfunction_act_cvtfp32bf16 fwd_sigmoid_cvtfp32bf16_kernel;
libxsmm_meltwfunction_copy fwd_zero_kernel;
libxsmm_meltwfunction_copy fwd_copy_bf16fp32_kernel;
libxsmm_meltwfunction_copy fwd_colbcast_bf16fp32_copy_kernel;
} my_fc_fwd_config;
typedef struct my_fc_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint bwd_bf;
libxsmm_blasint bwd_2d_blocking;
libxsmm_blasint bwd_row_teams;
libxsmm_blasint bwd_column_teams;
libxsmm_blasint upd_bf;
libxsmm_blasint upd_2d_blocking;
libxsmm_blasint upd_row_teams;
libxsmm_blasint upd_column_teams;
libxsmm_blasint ifm_subtasks;
libxsmm_blasint ofm_subtasks;
size_t scratch_size;
size_t doutput_scratch_mark;
libxsmm_barrier* barrier;
libxsmm_bsmmfunction bwd_config_kernel;
libxsmm_bsmmfunction upd_config_kernel;
libxsmm_bsmmfunction tilerelease_kernel;
libxsmm_bsmmfunction_reducebatch_strd gemm_bwd;
libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2;
libxsmm_bmmfunction_reducebatch_strd gemm_bwd3;
libxsmm_bsmmfunction_reducebatch_strd gemm_upd;
libxsmm_bsmmfunction_reducebatch_strd gemm_upd2;
libxsmm_bmmfunction_reducebatch_strd gemm_upd3;
libxsmm_meltwfunction_cvtfp32bf16 bwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_cvtfp32bf16 upd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_relu bwd_relu_kernel;
libxsmm_meltwfunction_copy bwd_zero_kernel;
libxsmm_meltwfunction_copy upd_zero_kernel;
libxsmm_meltwfunction_reduce delbias_reduce_kernel;
libxsmm_meltwfunction_transform vnni_to_vnniT_kernel;
libxsmm_meltwfunction_transform norm_to_normT_kernel;
libxsmm_meltwfunction_transform norm_to_vnni_kernel;
} my_fc_bwd_config;
typedef struct my_numa_thr_cfg {
int thr_s;
int thr_e;
int *blocksOFm_s;
int *blocksOFm_e;
int *blocksIFm_s;
int *blocksIFm_e;
libxsmm_bfloat16 **scratch;
size_t *layer_size;
libxsmm_bfloat16 **bwd_d_scratch;
size_t *bwd_d_layer_size;
libxsmm_bfloat16 **bwd_w_scratch;
size_t *bwd_w_layer_size;
} my_numa_thr_cfg;
my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_fwd_config res;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
libxsmm_blasint ld_zero = bk*bn;
libxsmm_blasint ld_upconvert = K;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
libxsmm_meltw_flags fusion_flags;
int l_flags, l_tc_flags;
int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
libxsmm_blasint unroll_hint;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.fwd_bf = 1;
res.fwd_2d_blocking = 1;
res.fwd_row_teams = 2;
res.fwd_column_teams = 8;
} else {
res.fwd_bf = 1;
res.fwd_2d_blocking = 0;
res.fwd_row_teams = 1;
res.fwd_column_teams = 1;
}
#if 0
res.fwd_bf = atoi(getenv("FWD_BF"));
res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING"));
res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS"));
res.fwd_column_teams = atoi(getenv("FWD_COLUMN_TEAMS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.C/res.bc)/res.fwd_bf;
res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL);
if ( res.fwd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_fwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_fwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_fwd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C;
res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd4 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C;
res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd5 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C;
res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd6 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C;
res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd7 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C;
res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd8 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_cvtfp32bf16(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVT_NONE);
if ( res.fwd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_cvtfp32bf16_act(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVTA_FUSE_RELU, 0);
if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_act_cvtfp32bf16(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_ACVT_FUSE_SIGM, 0);
if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL);
if ( res.tilerelease_kernel == NULL ) {
fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_zero_kernel = libxsmm_dispatch_meltw_copy(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_COPY_ZERO);
if ( res.fwd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_copy(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_COPY_COLBCAST);
if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_copy(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_COPY_NONE);
if ( res.fwd_copy_bf16fp32_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K));
return res;
}
my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_bwd_config res;
const libxsmm_trans_descriptor* tr_desc = 0;
libxsmm_descriptor_blob blob;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
libxsmm_blasint ld_zero_bwd = bc*bn;
libxsmm_blasint ld_zero_upd = bk;
libxsmm_blasint delbias_K = K;
libxsmm_blasint delbias_N = N;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
libxsmm_blasint updM;
libxsmm_blasint updN;
libxsmm_meltw_flags fusion_flags;
int l_flags, l_tc_flags;
int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
libxsmm_blasint unroll_hint;
size_t size_bwd_scratch;
size_t size_upd_scratch;
libxsmm_blasint bbk;
libxsmm_blasint bbc;
libxsmm_meltw_transform_flags trans_flags;
libxsmm_blasint ldaT = bc;
libxsmm_blasint ldb_orig= bc;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.bwd_bf = 1;
res.bwd_2d_blocking = 1;
res.bwd_row_teams = 2;
res.bwd_column_teams = 8;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_row_teams = 1;
res.upd_column_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
} else {
res.bwd_bf = 1;
res.bwd_2d_blocking = 0;
res.bwd_row_teams = 1;
res.bwd_column_teams = 1;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_row_teams = 1;
res.upd_column_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
}
bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks;
bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks;
#if 0
res.bwd_bf = atoi(getenv("BWD_BF"));
res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING"));
res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS"));
res.bwd_column_teams = atoi(getenv("BWD_COLUMN_TEAMS"));
res.upd_bf = atoi(getenv("UPD_BF"));
res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING"));
res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS"));
res.upd_column_teams = atoi(getenv("UPD_COLUMN_TEAMS"));
res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS"));
res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
/* BWD GEMM */
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.K/res.bk)/res.bwd_bf;
res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_bwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_bwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_bwd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n");
exit(-1);
}
res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL);
if ( res.bwd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_cvtfp32bf16(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVT_NONE);
if ( res.bwd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.bwd_relu_kernel = libxsmm_dispatch_meltw_relu(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_RELU_BWD, 0);
if ( res.bwd_relu_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n");
exit(-1);
}
res.bwd_zero_kernel = libxsmm_dispatch_meltw_copy(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_COPY_ZERO);
if ( res.bwd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
/* JITing the tranpose kernel */
trans_flags = LIBXSMM_MELTW_FLAG_TRANSFORM_VNNI_TO_VNNIT;
res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_transform(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, trans_flags);
if ( res.vnni_to_vnniT_kernel == NULL ) {
fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n");
exit(-1);
}
/* UPD GEMM */
lda = res.bk;
ldb = res.bn;
ldc = res.bk;
updM = res.bk/res.ofm_subtasks;
updN = res.bc/res.ifm_subtasks;
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.N/res.bn)/res.upd_bf;
res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_upd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n");
exit(-1);
}
res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_upd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n");
exit(-1);
}
l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C;
res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_upd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n");
exit(-1);
}
res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL);
if ( res.upd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n");
exit(-1);
}
res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL);
if ( res.tilerelease_kernel == NULL ) {
fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_cvtfp32bf16(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVT_VNNI_FORMAT);
if ( res.upd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.upd_zero_kernel = libxsmm_dispatch_meltw_copy(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_COPY_ZERO);
if ( res.upd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
res.delbias_reduce_kernel = libxsmm_dispatch_meltw_reduce(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_COLS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_NCNC_FORMAT, 0);
if ( res.delbias_reduce_kernel == NULL ) {
fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n");
exit(-1);
}
/* JITing the tranpose kernels */
trans_flags = LIBXSMM_MELTW_FLAG_TRANSFORM_NORM_TO_VNNI;
res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_transform(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, trans_flags);
if ( res.norm_to_vnni_kernel == NULL ) {
fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n");
exit(-1);
}
trans_flags = LIBXSMM_MELTW_FLAG_TRANSFORM_NORM_TO_NORMT;
res.norm_to_normT_kernel = libxsmm_dispatch_meltw_transform(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, trans_flags);
if ( res.norm_to_normT_kernel == NULL ) {
fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K;
size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K));
#ifdef OVERWRITE_DOUTPUT_BWDUPD
res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K;
#else
res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + 2 * sizeof(libxsmm_bfloat16) * res.N * res.K;
#endif
res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ;
return res;
}
my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk,
libxsmm_blasint threads, float lr) {
my_opt_config res;
/* setting up some handle values */
res.C = C;
res.K = K;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.lr = lr;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads) {
my_smax_fwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = (sizeof(float)*res.C*res.N*2);;
return res;
}
my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads, float loss_weight) {
my_smax_bwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
res.loss_weight = loss_weight;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = (sizeof(float)*res.C*res.N*2);;
return res;
}
void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr,
const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) {
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint lpb = 2;
const libxsmm_blasint bc_lp = cfg.bc/lpb;
/* const libxsmm_blasint bc = cfg.bc;*/
libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0;
libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0, my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0, my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0;
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc);
LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb);
LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk);
libxsmm_meltw_gemm_param gemm_eltwise_params;
libxsmm_blasint mb2 = 0;
float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL;
LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk);
LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32);
libxsmm_meltwfunction_cvtfp32bf16_act eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel;
libxsmm_meltw_cvtfp32bf16_act_param eltwise_params_act;
libxsmm_meltwfunction_cvtfp32bf16 eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel;
libxsmm_meltw_cvtfp32bf16_param eltwise_params;
libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise;
libxsmm_meltw_copy_param copy_params;
unsigned long long blocks = nBlocksIFm;
libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1;
if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7;
} else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4;
} else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5;
}
BF = cfg.fwd_bf;
CB_BLOCKS = nBlocksIFm/BF;
blocks = CB_BLOCKS;
if (use_2d_blocking == 1) {
row_teams = cfg.fwd_row_teams;
column_teams = cfg.fwd_column_teams;
my_col_id = ltid % column_teams;
my_row_id = ltid / column_teams;
im_tasks_per_thread = (nBlocksMB + row_teams-1)/row_teams;
in_tasks_per_thread = (nBlocksOFm + column_teams-1)/column_teams;
my_im_start = LIBXSMM_MIN( my_row_id * im_tasks_per_thread, nBlocksMB);
my_im_end = LIBXSMM_MIN( (my_row_id+1) * im_tasks_per_thread, nBlocksMB);
my_in_start = LIBXSMM_MIN( my_col_id * in_tasks_per_thread, nBlocksOFm);
my_in_end = LIBXSMM_MIN( (my_col_id+1) * in_tasks_per_thread, nBlocksOFm);
}
const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
cfg.fwd_config_kernel(NULL, NULL, NULL);
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) {
for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) {
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk);
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk);
cfg.fwd_colbcast_bf16fp32_copy_kernel(©_params);
} else {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
cfg.fwd_zero_kernel(©_params);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
eltwise_params_act.in_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out_ptr = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.actstore_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
eltwise_kernel_act(&eltwise_params_act);
} else {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_kernel(&eltwise_params);
}
}
}
}
}
} else {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk);
copy_params.out_ptr = fp32_bias_scratch;
cfg.fwd_copy_bf16fp32_kernel(©_params);
}
for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) {
for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) {
if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk;
}
if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
}
bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params);
} else {
cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks);
}
}
}
}
} else {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk);
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk);
cfg.fwd_colbcast_bf16fp32_copy_kernel(©_params);
} else {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
cfg.fwd_zero_kernel(©_params);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
eltwise_params_act.in_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out_ptr = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.actstore_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
eltwise_kernel_act(&eltwise_params_act);
} else {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_kernel(&eltwise_params);
}
}
}
}
} else {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk);
copy_params.out_ptr = fp32_bias_scratch;
cfg.fwd_copy_bf16fp32_kernel(©_params);
}
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk;
}
if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
}
bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params);
} else {
cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks);
}
}
}
}
cfg.tilerelease_kernel(NULL, NULL, NULL);
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_fc_bwd_exec( my_fc_bwd_config cfg, const libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr,
const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr,
libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch ) {
/* size variables, all const */
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
libxsmm_blasint lpb = 2;
const libxsmm_blasint bc_lp = bc/lpb;
const libxsmm_blasint bk_lp = bk/lpb;
const libxsmm_blasint bn_lp = bn/lpb;
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, mb2 = 0, ofm2 = 0;
libxsmm_blasint iteri = 0, iterj = 0;
libxsmm_blasint performed_doutput_transpose = 0;
libxsmm_meltw_transform_param trans_param;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work;
const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint dbias_work = nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work;
const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work;
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk);
LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32);
#ifdef OVERWRITE_DOUTPUT_BWDUPD
libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr;
libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch;
#else
libxsmm_bfloat16 *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)dout_act_ptr;
libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)grad_output_ptr + cfg.N * cfg.K : (libxsmm_bfloat16*)scratch;
#endif
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk);
libxsmm_meltw_relu_param relu_params;
libxsmm_meltwfunction_relu relu_kernel = cfg.bwd_relu_kernel;
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb);
libxsmm_meltwfunction_cvtfp32bf16 eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_cvtfp32bf16 eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel;
libxsmm_meltw_cvtfp32bf16_param eltwise_params;
libxsmm_meltw_copy_param copy_params;
libxsmm_meltw_reduce_param delbias_params;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
cfg.bwd_config_kernel(NULL, NULL, NULL);
/* Apply to doutput potential fusions */
if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) {
for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1/nBlocksOFm;
ofm1 = mb1ofm1%nBlocksOFm;
relu_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
relu_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
relu_params.mask_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
relu_kernel(&relu_params);
/* If in UPD pass, also perform transpose of doutput */
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
trans_param.in_ptr = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk);
trans_param.out_ptr = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb);
cfg.norm_to_vnni_kernel(&trans_param);
}
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
performed_doutput_transpose = 1;
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
/* Accumulation of bias happens in f32 */
if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) {
for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) {
delbias_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
delbias_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk);
cfg.delbias_reduce_kernel(&delbias_params);
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){
libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work;
const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work;
/* loop variables */
libxsmm_blasint ifm1 = 0, ifm2 = 0, ifm1ofm1 = 0, mb1ifm1 = 0;
libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0, my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0, my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0;
LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb);
float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2;
LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc);
unsigned long long blocks = nBlocksOFm;
libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1;
BF = cfg.bwd_bf;
KB_BLOCKS = nBlocksOFm/BF;
blocks = KB_BLOCKS;
if (use_2d_blocking == 1) {
row_teams = cfg.bwd_row_teams;
column_teams = cfg.bwd_column_teams;
my_col_id = ltid % column_teams;
my_row_id = ltid / column_teams;
im_tasks_per_thread = (nBlocksMB + row_teams-1)/row_teams;
in_tasks_per_thread = (nBlocksIFm + column_teams-1)/column_teams;
my_im_start = LIBXSMM_MIN( my_row_id * im_tasks_per_thread, nBlocksMB);
my_im_end = LIBXSMM_MIN( (my_row_id+1) * im_tasks_per_thread, nBlocksMB);
my_in_start = LIBXSMM_MIN( my_col_id * in_tasks_per_thread, nBlocksIFm);
my_in_end = LIBXSMM_MIN( (my_col_id+1) * in_tasks_per_thread, nBlocksIFm);
}
/* transpose weight */
for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) {
ofm1 = ifm1ofm1 / nBlocksIFm;
ifm1 = ifm1ofm1 % nBlocksIFm;
trans_param.in_ptr = &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb);
trans_param.out_ptr = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb);
cfg.vnni_to_vnniT_kernel(&trans_param);
}
/* wait for transpose to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for (ifm1 = my_in_start; ifm1 < my_in_end; ++ifm1) {
for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) {
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ofm1 == 0 ) {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
cfg.bwd_zero_kernel(©_params);
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
/* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */
if ( ofm1 == BF-1 ) {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_kernel(&eltwise_params);
}
}
}
}
} else {
for (ifm1 = my_in_start; ifm1 < my_in_end; ++ifm1) {
for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) {
cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ofm1 == 0 ) {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
cfg.bwd_zero_kernel(©_params);
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
/* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */
if ( ofm1 == BF-1 ) {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_kernel(&eltwise_params);
}
}
}
} else {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks;
const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks;
const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks;
const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks;
const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks;
const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks;
const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks;
/* 2D blocking parameters */
libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking;
libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0, my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0, my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_blasint BF = cfg.upd_bf;
/* loop variables */
libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, ii = 0, jj = 0, mb1ifm1 = 0, jc = 0, jk = 0;
/* Batch reduce related variables */
unsigned long long blocks = nBlocksMB/BF;
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb);
/* Set up tensors for transposing/scratch before vnni reformatting dfilter */
libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K);
float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C);
libxsmm_bfloat16 *dfilter_scratch = (libxsmm_bfloat16*) ((float*)dfilter_f32_ptr + cfg.C * cfg.K) + ltid * bc * bk;
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn);
LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dfilter_block, (libxsmm_bfloat16*)dfilter_scratch, bk);
const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm;
const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1);
const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work;
const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work;
const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm;
const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1);
const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work;
const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work;
/* These are used for the vnni reformatting of the f32 output */
__m512 a01, b01;
__m512i c01 = LIBXSMM_INTRINSICS_MM512_UNDEFINED_EPI32();
const __m512i perm_index = LIBXSMM_INTRINSICS_MM512_SET_EPI16(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8, 23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0);
if (use_2d_blocking == 1) {
row_teams = cfg.upd_row_teams;
column_teams = cfg.upd_column_teams;
my_col_id = ltid % column_teams;
my_row_id = ltid / column_teams;
im_tasks_per_thread = (nBlocksIFm + row_teams-1)/row_teams;
in_tasks_per_thread = (nBlocksOFm + column_teams-1)/column_teams;
my_im_start = LIBXSMM_MIN( my_row_id * im_tasks_per_thread, nBlocksIFm);
my_im_end = LIBXSMM_MIN( (my_row_id+1) * im_tasks_per_thread, nBlocksIFm);
my_in_start = LIBXSMM_MIN( my_col_id * in_tasks_per_thread, nBlocksOFm);
my_in_end = LIBXSMM_MIN( (my_col_id+1) * in_tasks_per_thread, nBlocksOFm);
}
/* Required upfront tranposes */
for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
trans_param.in_ptr = &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
trans_param.out_ptr = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn);
cfg.norm_to_normT_kernel(&trans_param);
}
if (performed_doutput_transpose == 0) {
for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
trans_param.in_ptr = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk);
trans_param.out_ptr = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb);
cfg.norm_to_vnni_kernel(&trans_param);
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
if (use_2d_blocking == 1) {
ifm2 = 0;
ofm2 = 0;
if (BF == 1) {
for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) {
for (ifm1 = my_im_start; ifm1 < my_im_end; ++ifm1) {
cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks);
}
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) {
for (ifm1 = my_im_start; ifm1 < my_im_end; ++ifm1) {
/* initialize current work task to zero */
if (bfn == 0) {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
cfg.upd_zero_kernel(©_params);
}
cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
/* Downconvert result to BF16 and vnni format */
if (bfn == BF-1) {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb);
eltwise_kernel2(&eltwise_params);
}
}
}
}
}
} else {
if (BF == 1) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks);
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
/* initialize current work task to zero */
if (bfn == 0) {
copy_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
cfg.upd_zero_kernel(©_params);
}
cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
/* Downconvert result to BF16 and vnni format */
if (bfn == BF-1) {
eltwise_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb);
eltwise_kernel2(&eltwise_params);
}
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
cfg.tilerelease_kernel(NULL, NULL, NULL);
}
void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) {
/* loop counters */
libxsmm_blasint i;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the filters */
const libxsmm_blasint work = cfg.C * cfg.K;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
#if defined(__AVX512BW__)
libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */
__m512 vlr = _mm512_set1_ps( cfg.lr );
for ( i = thr_begin; i < thr_begin+iv; i+=16 ) {
__m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) );
_mm512_store_fil( wt_ptr+i, newfilter );
_mm512_storeu_ps( master_wt_ptr+i, newfilter );
}
for ( i = thr_begin+iv; i < thr_end; ++i ) {
libxsmm_bfloat16_hp t1, t2;
t1.i[0] =0;
t1.i[1] = delwt_ptr[i];
master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f);
t2.f = master_wt_ptr[i];
wt_ptr[i] = t2.i[1];
}
#else
for ( i = thr_begin; i < thr_end; ++i ) {
libxsmm_bfloat16_hp t1, t2;
t1.i[0] =0;
t1.i[1] = delwt_ptr[i];
master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f);
t2.f = master_wt_ptr[i];
wt_ptr[i] = t2.i[1];
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint nc_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work;
const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work;
libxsmm_bfloat16* poutput_bf16 = out_act_ptr;
const libxsmm_bfloat16* pinput_bf16 = in_act_ptr;
float* poutput_fp32 = (float*)scratch;
float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C);
LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
#if defined(__AVX512BW__)
LIBXSMM_DNN_CONVERT_BUFFER_BF16_F32(pinput_bf16+nc_thr_begin, pinput_fp32+nc_thr_begin, nc_thr_end-nc_thr_begin);
#else
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.i[0] = 0;
in.i[1] = pinput_bf16[i];
pinput_fp32[i] = in.f;
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
float max = FLT_MIN;
float sum_of_exp = 0.0f;
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) {
max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
}
/* sum exp over outputs */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) );
sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
/* scale output */
sum_of_exp = 1.0f/sum_of_exp;
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp;
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
/* calculate loss single threaded */
if ( ltid == 0 ) {
(*loss) = 0.0f;
for ( img1 = 0; img1 < Bn; ++img1 ) {
for ( img2 = 0; img2 <bn; ++img2 ) {
libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn );
libxsmm_blasint ifm1b = ifm/bc;
libxsmm_blasint ifm2b = ifm%bc;
float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN;
*loss = LIBXSMM_LOGF( val );
}
}
*loss = ((-1.0f)*(*loss))/cfg.N;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
#if defined(__AVX512BW__)
LIBXSMM_DNN_CONVERT_BUFFER_F32_BF16(poutput_fp32+nc_thr_begin, poutput_bf16+nc_thr_begin, nc_thr_end-nc_thr_begin);
#else
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.f = poutput_fp32[i];
poutput_bf16[i] = in.i[1];
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
float rcp_N = 1.0f/cfg.N;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
/* number of tasks that could run in parallel for the batch */
const int nc_work = Bn * bn;
/* compute chunk size */
const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work;
const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work;
const libxsmm_bfloat16* poutput_bf16 = out_act_ptr;
libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr;
float* poutput_fp32 = (float*)scratch;
float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C);
LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
#if defined(__AVX512BW__)
LIBXSMM_DNN_CONVERT_BUFFER_BF16_F32(poutput_bf16+nc_thr_begin, poutput_fp32+nc_thr_begin, nc_thr_end-nc_thr_begin);
#else
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp out;
out.i[0] = 0;
out.i[1] = poutput_bf16[i];
poutput_fp32[i] = out.f;
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight;
} else {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight;
}
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
#if defined(__AVX512BW__)
LIBXSMM_DNN_CONVERT_BUFFER_F32_BF16(pdinput_fp32+nc_thr_begin, pdinput_bf16+nc_thr_begin, nc_thr_end-nc_thr_begin);
#else
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.f = pdinput_fp32[i];
pdinput_bf16[i] = in.i[1];
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) {
#if 0
int alignment = alignment_ - 1;
size_t adj_size = sizeof(size_t) + alignment;
void *r_ptr = NULL;
void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node);
if (t_ptr == NULL) return NULL;
r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment);
*((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr;
return r_ptr;
#else
return numa_alloc_onnode(size, numa_node);
#endif
}
void numa_free_aligned(void *ptr, size_t size) {
#if 0
if (ptr == NULL) return;
void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1));
numa_free(t_ptr, size);
#else
numa_free(ptr, size);
#endif
}
int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) {
int max_nodes = numa_max_node() + 1;
int max_cfg_nodes = numa_num_configured_nodes();
int max_cfg_cpus = numa_num_configured_cpus();
int max_task_cpus = numa_num_task_cpus();
my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes);
printf("FWD NUMA configuration:\n");
printf("There are %d numa nodes on the system\n", max_nodes);
printf("There are %d configured numa nodes on the system\n", max_cfg_nodes);
printf("There are %d configured CPUs on the system\n", max_cfg_cpus);
printf("There are %d CPUs asigned for the current task\n", max_task_cpus);
struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus);
int thr_count = 0, i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
numa_node_to_cpus(i, bmask);
numa_thr_cfg[i].scratch = (libxsmm_bfloat16**) malloc(sizeof(libxsmm_bfloat16*) * num_layers);
numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers);
numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers);
/*
printf("@@@@@ node %d size %zd cpus ", i, bmask->size);
size_t j = 0;
for(j = 0; j < bmask->size; j++)
printf("%d", numa_bitmask_isbitset(bmask, j));
printf("\n");
*/
int num_threads_in_mask = 0;
int t = 0;
for (t = 0; t < bmask->size; t++)
if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++;
int thr_s = 0, thr_e = 0, node_threads = 0;
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count)) {
numa_thr_cfg[i].thr_s = thr_count;
break;
}
thr_count++; node_threads++;
}
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count))
numa_thr_cfg[i].thr_e = thr_count;
thr_count++; node_threads++;
}
}
*numa_thr_cfg_ = numa_thr_cfg;
return 1;
}
int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int n_thr = numa_thr_cfg[i].thr_e - numa_thr_cfg[i].thr_s;
int l = 0;
for (l = 0; l < num_layers; l++) {
if (my_fc_fwd[l].fwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn;
if (my_fc_fwd[l].fwd_2d_blocking == 1) {
libxsmm_blasint column_teams = my_fc_fwd[l].fwd_column_teams;
libxsmm_blasint in_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, column_teams);
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e
&& numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) {
libxsmm_blasint my_col_id = thr % column_teams; /* ltid */
libxsmm_blasint my_in_start = LIBXSMM_MIN(my_col_id * in_tasks_per_thread, nBlocksOFm);
libxsmm_blasint my_in_end = LIBXSMM_MIN((my_col_id+1) * in_tasks_per_thread, nBlocksOFm);
numa_thr_cfg[i].blocksOFm_s[l] = (my_in_start <= numa_thr_cfg[i].blocksOFm_s[l])
? my_in_start
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (my_in_end >= numa_thr_cfg[i].blocksOFm_e[l])
? my_in_end
: numa_thr_cfg[i].blocksOFm_e[l];
}
} else {
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e
&& numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) {
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ?
(work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1);
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ofm_s = thr_begin / nBlocksMB;
int ofm_e = thr_end / nBlocksMB;
numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s <= numa_thr_cfg[i].blocksOFm_s[l])
? ofm_s
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e >= numa_thr_cfg[i].blocksOFm_e[l])
? ofm_e
: numa_thr_cfg[i].blocksOFm_e[l];
}
}
}
}
return 1;
}
int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, j = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
if (l_nBlocksOFm <= 0)
continue;
numa_thr_cfg[i].layer_size[l] = sizeof(libxsmm_bfloat16) * ((l_nBlocksOFm) * BOFM_shift);
numa_thr_cfg[i].scratch[l] = (libxsmm_bfloat16*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152);
if (numa_thr_cfg[i].scratch[l] == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
}
return 1;
}
int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, libxsmm_bfloat16 **fil_libxsmm) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i, l;
#pragma omp parallel for collapse(2) private (i,l)
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
int j = 0;
for (j = 0; j < l_nBlocksOFm ; j++) {
size_t l_BOFM_shift = j * BOFM_shift;
libxsmm_bfloat16 *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift;
libxsmm_bfloat16 *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift;
memcpy(out, inp, sizeof(libxsmm_bfloat16) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk);
}
}
}
return 1;
}
int main(int argc, char* argv[])
{
libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm;
float **fil_master;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
my_eltwise_fuse my_fuse;
my_fc_fwd_config* my_fc_fwd;
my_fc_bwd_config* my_fc_bwd;
my_opt_config* my_opt;
my_smax_fwd_config my_smax_fwd;
my_smax_bwd_config my_smax_bwd;
void* scratch = NULL;
size_t scratch_size = 0;
#ifdef CHECK_L1
float *last_act_fwd_f32 = NULL;
float *first_wt_bwdupd_f32 = NULL;
#endif
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 32; /* mini-batch size, "N" */
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */
int bn = 64;
int bk = 64;
int bc = 64;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
const char *const env_check = getenv("CHECK");
const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check));
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double act_size = 0.0;
double fil_size = 0.0;
float lr = 0.2f;
float loss = 0;
float loss_weight = 0.1f;
libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff;
libxsmm_matdiff_clear(&norms_fwd);
libxsmm_matdiff_clear(&norms_bwd);
libxsmm_matdiff_clear(&norms_upd);
libxsmm_matdiff_clear(&diff);
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 9;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n");
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) );
/* allocate data */
act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) );
delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
#ifdef ACT_NUMA_INTERLEAVED
act_libxsmm[i] = (libxsmm_bfloat16*)numa_alloc_interleaved( MB*C[i]*sizeof(libxsmm_bfloat16));
#else
act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
#endif
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
}
}
fil_master = (float**) malloc( num_layers*sizeof(float*) );
fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_bf16( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_bf16( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
{
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, fil_master[i], C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] );
free(cur_fil);
}
#else
my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 );
libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
float *cur_fil_vnni = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, cur_fil_vnni, C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( cur_fil_vnni, delfil_libxsmm[i], C[i]*C[i+1] );
free(cur_fil);
free(cur_fil_vnni);
#else
my_init_buf_bf16( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
if ( fuse_type == 0 ) {
my_fuse = MY_ELTWISE_FUSE_NONE;
} else if ( fuse_type == 1 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS;
} else if ( fuse_type == 2 ) {
my_fuse = MY_ELTWISE_FUSE_RELU;
} else if ( fuse_type == 4 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS_RELU;
} else {
/* cannot happen */
}
/* allocating handles */
my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) );
my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) );
my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, lr );
/* let's allocate and bind scratch */
if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+! layer */
my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads );
my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight );
if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
my_numa_thr_cfg *numa_thr_cfg;
setup_my_numa(&numa_thr_cfg, num_layers, nThreads);
if ( type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
copy_to_numa_buffers_fwd_inf(&numa_thr_cfg, num_layers, my_fc_fwd, fil_libxsmm);
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
const int numa_node = numa_node_of_cpu(tid);
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
libxsmm_bfloat16 *filt = numa_thr_cfg[numa_node].scratch[i];
my_fc_fwd_exec( my_fc_fwd[i], filt, act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = 0; i < num_layers; ++i) {
gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
/* Print some norms on last act for fwd and weights of first layer after all iterations */
last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]);
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
}
if (type == 'B') {
printf("##########################################\n");
printf("# Performance - BWD (custom-Storage) #\n");
printf("##########################################\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
#ifdef USE_SOFTMAX
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
if (type == 'A') {
printf("#########################################################\n");
printf("# Unimplemented: Performance - FWD-BWD (custom-Storage) #\n");
printf("#########################################################\n");
exit(-1);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, NULL, 0);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
#ifdef CHECK_L1
/* Print some norms on last act for fwd and weights of first layer after all iterations */
last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float));
libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]);
#if 1
libxsmm_convert_bf16_f32( fil_libxsmm[0], first_wt_bwdupd_f32, C[0]*C[1]);
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, first_wt_bwdupd_f32, first_wt_bwdupd_f32, 0, 0);
printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
#else
{
int e = 0;
FILE *fileAct, *fileWt;
float *ref_last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
float *ref_first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float));
float *ref_first_wt_bwdupd_f32_kc = (float*) malloc(C[0]*C[1]*sizeof(float));
libxsmm_bfloat16 *first_wt_bwdupd_bf16 = (libxsmm_bfloat16*) malloc(C[0]*C[1]*sizeof(libxsmm_bfloat16));
fileAct = fopen("acts.txt","r");
if (fileAct != NULL) {
int bufferLength = 255;
char buffer[bufferLength];
e = 0;
while(fgets(buffer, bufferLength, fileAct)) {
ref_last_act_fwd_f32[e] = atof(buffer);
e++;
}
fclose(fileAct);
}
/* compare */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, ref_last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("##########################################\n");
printf("# Correctness - Last fwd act #\n");
printf("##########################################\n");
printf("L1 reference : %.25g\n", norms_fwd.l1_ref);
printf("L1 test : %.25g\n", norms_fwd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_fwd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_fwd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_fwd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_fwd.linf_rel);
printf("Check-norm : %.24f\n", norms_fwd.normf_rel);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
fileWt = fopen("weights.txt","r");
if (fileWt != NULL) {
int bufferLength = 255;
char buffer[bufferLength];
e = 0;
while(fgets(buffer, bufferLength, fileWt)) {
ref_first_wt_bwdupd_f32[e] = atof(buffer);
e++;
}
fclose(fileWt);
}
matrix_copy_KCCK_to_KC( ref_first_wt_bwdupd_f32, ref_first_wt_bwdupd_f32_kc, C[0], C[1], bc, bk );
matrix_copy_KCCK_to_KC_bf16( fil_libxsmm[0], first_wt_bwdupd_bf16, C[0], C[1], bc, bk );
libxsmm_convert_bf16_f32( first_wt_bwdupd_bf16, first_wt_bwdupd_f32, C[0]*C[1] );
/* compare */
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, ref_first_wt_bwdupd_f32_kc, first_wt_bwdupd_f32, 0, 0);
printf("##########################################\n");
printf("# Correctness - First bwdupd wt #\n");
printf("##########################################\n");
printf("L1 reference : %.25g\n", norms_bwd.l1_ref);
printf("L1 test : %.25g\n", norms_bwd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_bwd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_bwd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_bwd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_bwd.linf_rel);
printf("Check-norm : %.24f\n", norms_bwd.normf_rel);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
free(ref_last_act_fwd_f32);
free(ref_first_wt_bwdupd_f32);
free(ref_first_wt_bwdupd_f32_kc);
free(first_wt_bwdupd_bf16);
}
#endif
free(first_wt_bwdupd_f32);
free(last_act_fwd_f32);
#endif
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i], MB*C[i]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[i]);
#endif
libxsmm_free(delact_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[i+1]);
#endif
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
libxsmm_free(fil_master[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[num_layers+1]);
#endif
libxsmm_free(label_libxsmm);
for (i = 0; i < numa_num_configured_nodes(); i++) {
free(numa_thr_cfg[i].blocksOFm_s);
free(numa_thr_cfg[i].blocksOFm_e);
for (j = 0; j < num_layers; j++)
numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]);
free(numa_thr_cfg[i].scratch);
free(numa_thr_cfg[i].layer_size);
}
free(numa_thr_cfg);
free( my_opt );
free( my_fc_fwd );
free( my_fc_bwd );
free( act_libxsmm );
free( delact_libxsmm );
free( fil_master );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
main.c | #include <stdio.h>
#include <time.h>
struct Result
{
long operationsRun;
double millisecondsSpent;
double microsecondsPerOperation;
};
/* Here lies the actual code */
void executeSummation(int range)
{
int output[range][range];
#pragma acc kernels
{
#pragma omp parallel for
for (int a = 0; a < range; a++)
{
for (int b = 0; b < range; b++)
{
int result = 0;
for (int n = 0; n < (a + b); n++)
{
result += n;
}
output[a][b] += result;
}
}
}
}
struct Result runComputation(int range)
{
struct Result result;
clock_t begin = clock();
executeSummation(range);
clock_t end = clock();
result.millisecondsSpent = 1000.0 * (double)(end - begin) / CLOCKS_PER_SEC;
result.microsecondsPerOperation = 1000.0 * result.millisecondsSpent / (double)(range * range);
return result;
}
int main()
{
struct Result result = runComputation(10000);
printf("\nRan %d ops\n", result.operationsRun);
printf("Total execution time: %f ms\n", result.millisecondsSpent);
printf("Total time per op: %f microsecs\n", result.microsecondsPerOperation);
return 0;
} |
axpy.c | /*
* AXPY Y[N] = Y[N] + a*X[N]
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#include <pthread.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 102400
/* initialize a vector with random floating point numbers */
void init(REAL A[], int N) {
int i;
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
double check(REAL A[], REAL B[], int N) {
int i;
double sum = 0.0;
for (i = 0; i < N; i++) {
sum += A[i] - B[i];
}
return sum;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a);
void axpy_base_sub(int i_start, int Nt, int N, REAL Y[], REAL X[], REAL a);
void axpy_dist(int N, REAL Y[], REAL X[], REAL a, int num_tasks);
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a, int num_tasks);
void axpy_omp_parallel_for(int N, REAL Y[], REAL X[], REAL a);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
int num_tasks = 4; /* 4 is default number of tasks */
double elapsed; /* for timing */
double elapsed_dist; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: axpy <n> [<#tasks(%d)>] (n should be dividable by #tasks)\n", num_tasks);
exit(1);
}
N = atoi(argv[1]);
if (argc > 2) num_tasks = atoi(argv[2]);
REAL a = 123.456;
REAL Y_base[N];
REAL Y_dist[N];
REAL X[N];
srand48((1 << 12));
init(X, N);
init(Y_base, N);
memcpy(Y_dist, Y_base, N * sizeof(REAL));
/* example run */
elapsed = read_timer();
axpy_omp_parallel_for(N, Y_base, X, a);
elapsed = (read_timer() - elapsed);
elapsed_dist = read_timer();
axpy_omp_parallel(N, Y_dist, X, a, num_tasks);
elapsed_dist = (read_timer() - elapsed_dist);
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("\tAXPY: Y[N] = Y[N] + a*X[N], N=%d, %d tasks for dist\n", N, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("axpy_base:\t\t%4f\t%4f \t\t%g\n", elapsed * 1.0e3, (2.0 * N) / (1.0e6 * elapsed), check(Y_base, Y_base, N));
printf("axpy_dist:\t\t%4f\t%4f \t\t%g\n", elapsed_dist * 1.0e3, (2.0 * N) / (1.0e6 * elapsed_dist), check(Y_base, Y_dist, N));
return 0;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
void axpy_base_sub(int i_start, int Nt, int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = i_start; i < i_start + Nt; ++i)
Y[i] += a * X[i];
}
void axpy_dist(int N, REAL Y[], REAL X[], REAL a, int num_tasks) {
int tid;
for (tid = 0; tid < num_tasks; tid++) {
int Nt, start;
Nt = N/num_tasks;
start = tid*Nt;
axpy_base_sub(start, Nt, N, Y, X, a);
}
}
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a, int num_tasks) {
int tid;
#pragma omp parallel shared (Y, X, a, N, num_tasks) private (tid) num_threads(num_tasks)
{
int Nt, start;
tid = omp_get_thread_num();
// int num_th = omp_get_num_threads();
Nt = N/num_tasks;
start = tid*Nt;
axpy_base_sub(start, Nt, N, Y, X, a);
}
}
void axpy_omp_parallel_for(int N, REAL Y[], REAL X[], REAL a) {
int i;
#pragma omp parallel shared (N, Y, X, a) private (i)
{
#pragma omp master
{
int nthreads = omp_get_num_threads();
printf("nthreads: %d\n", nthreads);
}
#pragma omp for schedule(static)
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
}
|
GB_unaryop__minv_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_int16
// op(A') function: GB_tran__minv_fp64_int16
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_int16
(
double *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
inputBug342.c | /*
-rose:C_only does not work!
*/
int main(void)
{
int i,j;
#pragma omp for
for (i=0;i<10;i++) j=0;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.