source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
pr39154.c | /* PR middle-end/39154 */
/* { dg-do compile } */
/* { dg-additional-options "-std=gnu99" } */
extern void abort (void);
int n = 20;
int
main (void)
{
int a[n], b[n][n];
#pragma omp parallel for
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 1)
abort ();
if (a[i] != i + 1)
abort ();
}
#pragma omp parallel for shared (n, a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 3;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 3)
abort ();
if (a[i] != i + 3)
abort ();
}
#pragma omp parallel for
for (int i = 0; i < n; i++)
{
a[i] = i + 5;
#pragma omp parallel for shared (n, a, b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 5)
abort ();
if (a[i] != i + 5)
abort ();
}
#pragma omp parallel for shared (n, a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 7;
#pragma omp parallel for shared (n, a, b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 7)
abort ();
if (a[i] != i + 7)
abort ();
}
#pragma omp parallel for private (a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
#pragma omp parallel for private (a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for private (b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
return 0;
}
|
GB_unaryop__minv_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int16
// op(A') function: GB_tran__minv_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
/**
* \brief: 用来获取一张图的真实标签信息. 对于图像检测,标签信息包括目标类别(用类别编号表示)
* 以及矩形框中心点坐标 x,y 以及宽高 w,h
*
* \param: path 一张图片所在路径,字符数组
* num_boxes 每张图片允许处理的最大的矩形框数; 如果图片中包含的矩形框大于
* num_boxes, 那么随机取其中 num_boxes 个矩形框参与训练
* classes 本函数并未使用该参数
* flip 图片在之前读入时是否进行左右翻转
* dx dx 是中间图相对最终图的起点位置的 x 坐标除以最终图的宽度(并取负值)
* dy dy 是中间图相对最终图的起点位置的 x 坐标除以最终图的高度(并取负值)
* sx sx 是中间图宽度与最终图宽度的比值
* sy sy 是中间图高度与最终图高度的比值
*
* 说明: 后面五个参数,用来矫正矩形框的信息,因为在此函数之前,对输入图片进行了缩放、
* 平移、左右翻转一系列的数据增广操作,这些操作不会改变物体的类别信息, 但会改
* 变物体的位置信息,也即矩形框信息,因此需要进行矫正.
* 这些参数的具体含义上面可能未说清,具体可参看本函数内部调用时用法
*
* \return: truth 存储一张图片包含的所有标签信息, 实质上是一个一维数组,每个矩形框
* 有 5 条信息. 对于检测而言,主要包括物体类别以及定位(矩形框)信息.
*/
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes,
int flip, float dx, float dy, float sx, float sy)
{
// 确定 labels 文件存放目录
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
// 读取 labelpath 标注文件中对应的标注信息
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count); // 打乱标注框的顺序
correct_boxes(boxes, count, dx, dy, sx, sy, flip); // 根据可能进行过的图像增广方式调整矩形框标注参数
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
/**
* \brief: 获取对应数据集中的类名, 返回值为一个二维字符数组.
* 字符串一般保存在一维字符数组中, 因此对于字符串数组, 就是一个二维数组
*
* \param: filename 文件路径名
*
* \return: char** 类型,包含从文件中读取到的类名
*/
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
/**
*
* \brief: 加载训练检测器时的原始数据并进行图像增广.
*
* 从所有训练图片中,随机读取 n 张,并对这 n 张图片进行数据增广,同时矫正
* 增广后的数据标签信息。最终得到的图片的宽高为 w,h(原始训练集中的图片尺寸不定),
* 也就是送入网络处理的图片尺寸,
*
* 数据增广包括: 对原始图片进行宽高方向上的插值缩放(两方向上缩放系数不一定相同),
* 下面称之为缩放抖动; 随机抠取或者平移图片(位置抖动);
* 在 hsv 颜色空间增加噪声(颜色抖动); 左右水平翻转,不含旋转抖动。
*
* \param: n 一个线程读入的图片张数, 不是总的训练图片张数, 而是分配到该线程上的
* n, 比如总共要读入 128 张图片, 共开启 8 个线程读数据, 那么本函数中
* 的 n 为 128 / 8 = 16
* paths 所有训练图片所在路径的集合,是一个二维数组,每一行对应一张图片的路径
* 程序将在其中随机取 n 个
* m paths 的行数,也即训练图片总数
* w 网络能够处理的图的宽度
* h 网络能够处理的图的高度
* boxes 每张训练图片中程序处理的最多矩形框数
* 图片内可能含有大量的矩形框,那么就在其中随机选择 boxes 个参与训练,
* 具体执行在 fill_truth_detection() 函数中
* classes 类别总数. 本函数并未用到(其实是 fill_truth_detection() 函数)
* jitter 这个参数表示图片缩放抖动的剧烈程度,这个值越大,允许的抖动范围越大
* 所谓缩放抖动,就是在宽高上插值缩放图片,宽高两方向上缩放的系数不一定相同
* hue 颜色(hsv颜色空间)上色调(取值0-360度)偏差的最大值,
* 实际色调偏差为-hue~hue之间的随机值
* saturation 颜色(hsv颜色空间)上色彩饱和度(取值范围0~1)缩放的最大值
* exposure 颜色(hsv颜色空间)上明度(色彩明亮程度,0~1)缩放的最大值
*
* \return: data 类型数据,包含一个线程读入的所有图片数据(含有n张图片)
*
* 说明: 最后四个参数用于数据增广, 主要对原图进行缩放抖动, 位置抖动(平移)以及颜色抖动
* (颜色值增加一定噪声),抖动一定程度上可以理解成对图像增加噪声。
*
* 通过对原始图像进行抖动,实现数据增广。最后三个参数在 random_distort_image()
* 函数中使用
*
* 从此函数可以看出,darknet 对训练集中图片的尺寸没有要求,可以是任意尺寸的图片,
* 因为经该函数处理(缩放/裁剪)之后,不管是什么尺寸的照片,都会统一为网络训练使用的尺寸
*/
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes,
int classes, float jitter, float hue, float saturation, float exposure)
{
// 从 paths 中随机选取 n 张图片路径; 注意内存空间管理
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
// 依次读入每一张图片到 d.X.vals 的适当位置
// 同时读入对应的标签信息到 d.y.vals 的适当位置
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) /
(orig.h + rand_uniform(-dh, dh)); // 新的长宽比
float scale = rand_uniform(.25, 2); // 缩放比例
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw); // 宽的变化
float dy = rand_uniform(0, h - nh); // 高的变化
// 平移
place_image(orig, nw, nh, dx, dy, sized);
// hsv 颜色空间上的色度增广
random_distort_image(sized, hue, saturation, exposure);
// 镜像翻转
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
// 加载 labels 数据, 并且根据增广方式调整图片的 Bndbox 参数
fill_truth_detection(random_paths[i], boxes, d.y.vals[i],
classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
/*
* \brief: 开辟多个线程读入图片数据,读入数据存储至 ptr.d 中(主要调用
* load_in_thread() 函数完成)
*
* \param: ptr: 包含所有线程要读入图片数据的信息( 如: 读入多少张, 开几个线程读入,
* 读入图片最终的宽高, 图片路径等等 )
*
* 流程: 本函数首先会获取要读入图片的张数、要开启线程的个数, 而后计算每个线程应该读入的
* 图片张数(尽可能的均匀分配). 之后创建所有的线程,并行读入数据,最后合并每个线程
* 读入的数据至一个大 data 中,这个 data 的指针变量与 ptr 的指针变量
* 指向的是统一块内存, 因此也就最终将数据读入到 ptr.d 中(因此函数没有返回值)
*/
void *load_threads(void *ptr)
{
int i;
// args 变量(不是指针变量)是 ptr 指向的内存空间的拷贝;
// 但是 ptr 指向内存空间中保存的指针变量指向的空间仍然和 args 变量中的指针变量
// 指向的空间相同, 即共享内存空间(下面代码中的 buffers)
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n; // 所有的训练图片数据总数
free(ptr);
// 可以看出, 每个线程负责加载一部分数据, 最后再进行汇总
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
// 均分每个线程的加载数量, 防止不能被整除的情况
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
GB_unaryop__identity_fp64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint8
// op(A') function: GB_tran__identity_fp64_uint8
// C type: double
// A type: uint8_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint8
(
double *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
overflow.c | void find_satellites(int icen, void *kd) {
int j, k, ii, neighbor[100000], i, cnt;
float dx, dy, dz, theta, prob_ang, vol_corr, prob_rad, grp_lum, p0, range;
float cenDist, bprob, mtot, nsat;
void *set;
int *pch;
double cen[3];
double sat[3];
// check if this galaxy has already been given to a group
if(GAL[icen].psat>0.5)return;
// Use the k-d tree kd to identify the nearest galaxies to the central.
cen[0] = GAL[icen].x;
cen[1] = GAL[icen].y;
cen[2] = GAL[icen].z;
// Nearest neighbour search should go out to about 4*sigma, the velocity dispersion of the SHAMed halo.
// find all galaxies in 3D that are within 4sigma of the velocity dispersion
range = 4*GAL[icen].sigmav/100.0*(1+GAL[icen].redshift)/
sqrt(OMEGA_M*pow(1+GAL[icen].redshift,3.0) + 1-OMEGA_M);
set = kd_nearest_range(kd, cen, range);
// Set now contains the nearest neighbours within a distance range. Grab their info.
// get the list of all neighbors to farm out to multithreads
cnt = 0;
while( !kd_res_end(set)) {
pch = (int*)kd_res_item(set, sat);
neighbor[cnt] = *pch;
kd_res_next(set);
cnt++;
}
mtot = nsat = 0;
#pragma omp parallel private(ii,j,dz,theta,prob_ang,prob_rad,bprob,p0)
{
mtot=0;nsat=0;
#pragma omp for reduction(+:mtot) reduction(+:nsat)
for(ii=0;ii<cnt;++ii) {
j = neighbor[ii];
// Skip if target galaxy is the same as the central (obviously).
if(j == icen)continue;
// skip if the object is more massive than the icen
if(GAL[j].mstellar>=GAL[icen].mstellar)continue;
// Skip if already assigned to a central.
if(GAL[j].psat)continue;
// check if the galaxy is outside the angular radius of the halo
dz = fabs(GAL[icen].redshift - GAL[j].redshift)*SPEED_OF_LIGHT;
theta = angular_separation(GAL[icen].ra,GAL[icen].dec,GAL[j].ra,GAL[j].dec);
if(theta > GAL[icen].theta){
continue;
}
// Now determine the probability of being a satellite
//(both projected onto the sky, and along the line of sight).
prob_ang = radial_probability(GAL[icen].mass, theta, GAL[icen].rad, GAL[icen].theta);
prob_rad = exp(-dz*dz/(2*GAL[icen].sigmav*GAL[icen].sigmav))
*SPEED_OF_LIGHT/(RT2PI*GAL[icen].sigmav);
// set the background level
if(GAL[j].color>0.8)
bprob = BPROB_RED + (log10(GAL[j].mstellar)-9.5)*BPROB_XRED;
else
bprob = BPROB_BLUE + (log10(GAL[j].mstellar)-9.5)*BPROB_XBLUE;
// combine them into the total probability
p0 = (1 - 1/(1 + prob_ang * prob_rad / bprob));
if(p0 < 0){
printf("ZERO %e\n",p0);
p0 = 0;
}
if(p0>0.5){
// this is considered a member of the group
GAL[j].psat = p0;
GAL[j].igrp = icen;
mtot += GAL[j].mstellar;
nsat ++;
}
//GAL[icen].mtot += GAL[j].mstellar;
//GAL[icen].nsat++;
}
}
GAL[icen].mtot += mtot;
GAL[icen].nsat = nsat;
//exit(0);
// Correct for boundary conditions
dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MINREDSHIFT);
vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[icen].sigmav)));
GAL[icen].nsat /= vol_corr;
GAL[icen].mtot /= vol_corr;
dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MAXREDSHIFT);
vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[j].sigmav)));
GAL[icen].nsat /= vol_corr;
GAL[icen].mtot /= vol_corr;
}
|
seq_multivector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_multivector.h"
#include "_hypre_utilities.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCreate
*--------------------------------------------------------------------------*/
hypre_Multivector *
hypre_SeqMultivectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Multivector *mvector;
mvector = (hypre_Multivector *) hypre_MAlloc(sizeof(hypre_Multivector), HYPRE_MEMORY_HOST);
hypre_MultivectorNumVectors(mvector) = num_vectors;
hypre_MultivectorSize(mvector) = size;
hypre_MultivectorOwnsData(mvector) = 1;
hypre_MultivectorData(mvector) = NULL;
mvector->num_active_vectors=0;
mvector->active_indices=NULL;
return mvector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorInitialize( hypre_Multivector *mvector )
{
HYPRE_Int ierr = 0, i, size, num_vectors;
size = hypre_MultivectorSize(mvector);
num_vectors = hypre_MultivectorNumVectors(mvector);
if (NULL==hypre_MultivectorData(mvector))
hypre_MultivectorData(mvector) =
(HYPRE_Complex *) hypre_MAlloc(sizeof(HYPRE_Complex)*size*num_vectors, HYPRE_MEMORY_HOST);
/* now we create a "mask" of "active" vectors; initially all active */
if (NULL==mvector->active_indices)
{
mvector->active_indices hypre_CTAlloc(HYPRE_Int, num_vectors, HYPRE_MEMORY_HOST);
for (i=0; i<num_vectors; i++) mvector->active_indices[i] = i;
mvector->num_active_vectors=num_vectors;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetDataOwner(hypre_Multivector *mvector, HYPRE_Int owns_data)
{
HYPRE_Int ierr=0;
hypre_MultivectorOwnsData(mvector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorDestroy(hypre_Multivector *mvector)
{
HYPRE_Int ierr=0;
if (NULL!=mvector)
{
if (hypre_MultivectorOwnsData(mvector) && NULL!=hypre_MultivectorData(mvector))
hypre_TFree( hypre_MultivectorData(mvector) , HYPRE_MEMORY_HOST);
if (NULL!=mvector->active_indices)
hypre_TFree(mvector->active_indices, HYPRE_MEMORY_HOST);
hypre_TFree(mvector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetMask
* (this routine accepts mask in "zeros and ones format, and converts it to
the one used in the structure "hypre_Multivector")
*-------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetMask(hypre_Multivector *mvector, HYPRE_Int * mask)
{
HYPRE_Int i, num_vectors = mvector->num_vectors;
if (mvector->active_indices != NULL) hypre_TFree(mvector->active_indices, HYPRE_MEMORY_HOST);
mvector->active_indices hypre_CTAlloc(HYPRE_Int, num_vectors, HYPRE_MEMORY_HOST);
mvector->num_active_vectors=0;
if (mask!=NULL)
for (i=0; i<num_vectors; i++)
{
if ( mask[i] )
mvector->active_indices[mvector->num_active_vectors++]=i;
}
else
for (i=0; i<num_vectors; i++)
mvector->active_indices[mvector->num_active_vectors++]=i;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetConstantValues(hypre_Multivector *v, HYPRE_Complex value)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
if (v->num_active_vectors == v->num_vectors)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < v->num_vectors*size; j++) vector_data[j] = value;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = start_offset; j < end_offset; j++) vector_data[j]= value;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetRandomValues(hypre_Multivector *v, HYPRE_Int seed)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
hypre_SeedRand(seed);
/* comment from vector.c: RDF: threading this loop may cause problems
because of hypre_Rand() */
if (v->num_active_vectors == v->num_vectors)
{
for (j = 0; j < v->num_vectors*size; j++)
vector_data[j] = 2.0 * hypre_Rand() - 1.0;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
for (j = start_offset; j < end_offset; j++)
vector_data[j]= 2.0 * hypre_Rand() - 1.0;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCopy
* copies data from x to y
* y should have already been initialized at the same size as x
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorCopy(hypre_Multivector *x, hypre_Multivector *y)
{
HYPRE_Int i, size, num_bytes, num_active_vectors, *x_active_ind, * y_active_ind;
HYPRE_Complex *x_data, *y_data, *dest, * src;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
num_active_vectors = x->num_active_vectors;
size = x->size;
x_data = x->data;
y_data = y->data;
x_active_ind=x->active_indices;
y_active_ind=y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
num_bytes = x->num_vectors * size;
hypre_TMemcpy(y_data, x_data, HYPRE_Complex, num_bytes, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
else
{
num_bytes = size;
for (i=0; i < num_active_vectors; i++)
{
src=x_data + size * x_active_ind[i];
dest = y_data + size * y_active_ind[i];
hypre_Memcpy(dest, src, HYPRE_Complex, num_bytes, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorCopyWithoutMask(hypre_Multivector *x ,
hypre_Multivector *y)
{
HYPRE_Int byte_count;
hypre_assert (x->size == y->size && x->num_vectors == y->num_vectors);
byte_count = x->size * x->num_vectors;
hypre_Memcpy(y->data, x->data, HYPRE_Complex, byte_count, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorAxpy(HYPRE_Complex alpha, hypre_Multivector *x,
hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *src, *dest;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
for(i = 0; i < x->num_vectors*size; i++) dest[i] += alpha * src[i];
}
else
{
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++) dest[j] += alpha * src[j];
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorByDiag: " y(<y_mask>) = alpha(<mask>) .* x(<x_mask>) "
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorByDiag(hypre_Multivector *x, HYPRE_Int *mask, HYPRE_Int n,
HYPRE_Complex *alpha, hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Int *al_active_ind, num_active_als;
HYPRE_Complex *x_data, *y_data, *dest, *src, current_alpha;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
/* build list of active indices in alpha */
al_active_ind = hypre_TAlloc(HYPRE_Int, n, HYPRE_MEMORY_HOST);
num_active_als = 0;
if (mask!=NULL)
for (i=0; i<n; i++)
{
if (mask[i])
al_active_ind[num_active_als++]=i;
}
else
for (i=0; i<n; i++)
al_active_ind[num_active_als++]=i;
hypre_assert (num_active_als==x->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
current_alpha=alpha[ al_active_ind[i] ];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++)
dest[j] = current_alpha*src[j];
}
hypre_TFree(al_active_ind, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProd(hypre_Multivector *x, hypre_Multivector *y,
HYPRE_Real *results )
{
HYPRE_Int i, j, k, size, *x_active_ind, *y_active_ind;
HYPRE_Int x_num_active_vectors, y_num_active_vectors;
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
hypre_assert (x->size==y->size);
x_data = x->data;
y_data = y->data;
size = x->size;
x_num_active_vectors = x->num_active_vectors;
y_num_active_vectors = y->num_active_vectors;
/* we assume that "results" points to contiguous array of (x_num_active_vectors X
y_num_active_vectors) doubles */
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(j = 0; j < y_num_active_vectors; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i = 0; i < x_num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k = 0; k < size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
/* column-wise storage for results */
*results++ = current_product;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProdDiag
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProdDiag(hypre_Multivector *x,
hypre_Multivector *y, HYPRE_Real *diagResults)
{
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
HYPRE_Int i, k, size, num_active_vectors, *x_active_ind, *y_active_ind;
hypre_assert(x->size==y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for (i=0; i<num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
y_ptr = y_data + y_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k=0; k<size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
*diagResults++ = current_product;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorByMatrix(hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
hypre_assert(rHeight>0);
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
/* ------ set current "y" to first member in a sum ------ */
x_ptr = x_data + x_active_ind[0]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] = current_coef * x_ptr[k];
/* ------ now add all other members of a sum to "y" ----- */
for (i=1; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorXapy (hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i=0; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
|
test72.c | void foo();
void max () {
int z = 0;
z = 10;
foo();
z = 33;
#pragma omp barrier
z = 15;
}
void bar() {
int y;
if (2) {
y = 6;
max();
} else {
#pragma omp barrier
}
}
void foo () {
int x = 0;
if (1) {
bar();
#pragma omp barrier
x = 4;
} else {
bar();
#pragma omp barrier
x = 3;
}
}
int main() {
#pragma omp parallel
foo();
}
|
gpg_fmt_plug.c | /* GPG cracker patch for JtR. Hacked together during Monsoon of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and is based on,
*
* pgpry - PGP private key recovery
* Copyright (C) 2010 Jonas Gehring
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>
*
* converted to use 'common' code, Feb29-Mar1 2016, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_gpg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_gpg);
#else
#include <string.h>
#include <assert.h>
#include "twofish.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "md5.h"
#include "rc4.h"
#include "pdfcrack_md5.h"
#include "sha.h"
#include "sha2.h"
#include "stdint.h"
#include "gpg_common.h"
#include "memdbg.h"
#define FORMAT_LABEL "gpg"
#define FORMAT_NAME "OpenPGP / GnuPG Secret Key"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define SALT_SIZE sizeof(struct gpg_common_custom_salt*)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int any_cracked;
static size_t cracked_size;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
Twofish_initialise();
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
gpg_common_cur_salt = *(struct gpg_common_custom_salt **)salt;
}
static void gpg_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int ks = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
int res;
unsigned char keydata[64];
gpg_common_cur_salt->s2kfun(saved_key[index], keydata, ks);
res = gpg_common_check(keydata, ks);
if (res) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_gpg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{
"s2k-count", /* only for gpg --s2k-mode 3, see man gpg, option --s2k-count n */
"hash algorithm [1:MD5 2:SHA1 3:RIPEMD160 8:SHA256 9:SHA384 10:SHA512 11:SHA224]",
"cipher algorithm [1:IDEA 2:3DES 3:CAST5 4:Blowfish 7:AES128 8:AES192 9:AES256 10:Twofish 11:Camellia128 12:Camellia192 13:Camellia256]",
},
{ FORMAT_TAG },
gpg_common_gpg_tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
gpg_common_valid,
fmt_default_split,
fmt_default_binary,
gpg_common_get_salt,
{
gpg_common_gpg_s2k_count,
gpg_common_gpg_hash_algorithm,
gpg_common_gpg_cipher_algorithm,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
gpg_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
tagFromPhasedVcf.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void usage() {
fprintf(stderr, "usage: tagFromPhasedVcf <ALIGN_BAM> <REFERENCE_FASTA> <VARIANT_VCF> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Tags reads in ALIGN_BAM using already-phased variants in VARIANT_VCF.\n");
fprintf(stderr, " This tool does not attempt to phase variants!\n");
fprintf(stderr, " This tool does not have phaseset awareness, so the VCF should avoid overlapping phasesets\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n");
fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n");
fprintf(stderr, " PARAMS is the file with margin parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\n");
}
int main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
bool inMemory = TRUE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
vcfFile = stString_copy(argv[3]);
paramsFile = stString_copy(argv[4]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:t:o:r:k", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case 'k':
inMemory = FALSE;
break;
default:
usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
return 0;
}
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(vcfFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get valid contigs (to help bam chunker construction)
stList *vcfContigsTmp = stHash_getKeys(vcfEntries);
stSet *vcfContigs = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(vcfContigsTmp); i++) {
stSet_insert(vcfContigs, stList_get(vcfContigsTmp, i));
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, vcfContigs, params->polishParams, TRUE);
char *regionStrInformative = regionStr != NULL ? stString_copy(regionStr) : stString_join2(",", vcfContigsTmp);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStrInformative, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
free(regionStrInformative);
stList_destruct(vcfContigsTmp);
stSet_destruct(vcfContigs);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, NULL, NULL, NULL, NULL,
".hap1", ".hap2", inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeDescriptor);
free(timeDescriptor);
}
// Get reference string for chunk of alignment
char *chunkReference = getSequenceFromReference(referenceFastaFile, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// get VCF string
stList *chunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
stList *filteredChunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
getVcfEntriesForRegion(vcfEntries, chunkVcfEntries, filteredChunkVcfEntries, NULL, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
updateVcfEntriesWithSubstringsAndPositions(chunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
extractReadSubstringsAtVariantPositions(bamChunk, chunkVcfEntries, reads, NULL, params);
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
int64_t bubbleFindingIteration = 0;
BubbleGraph *bg = NULL;
stSet *readsBelongingToHap1 = stSet_construct(), *readsBelongingToHap2 = stSet_construct();
stList *vcfEntriesToBubbles = NULL;
// Get the bubble graph representation
bg = bubbleGraph_constructFromVCFAndBamChunkReadVcfEntrySubstrings(reads, chunkVcfEntries, params,
&vcfEntriesToBubbles);
bubbleGraph_partitionFilteredReadsFromPhasedVcfEntries(reads, bg, vcfEntriesToBubbles, readsBelongingToHap1,
readsBelongingToHap2, params, logIdentifier);
// Output
stGenomeFragment *gF = stGenomeFragment_constructEmpty(NULL, 0, 1,
stSet_construct3(stHash_stringKey, stHash_stringEqualKey, free),
stSet_construct3(stHash_stringKey, stHash_stringEqualKey, free));
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
NULL, NULL, reads, readsBelongingToHap1, readsBelongingToHap2, gF,
params);
// Cleanup
stList_destruct(chunkVcfEntries);
stList_destruct(filteredChunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gF);
stList_destruct(vcfEntriesToBubbles);
free(chunkReference);
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n",
logIdentifier, stList_length(reads), (int) (time(NULL) - chunkStartTime));
}
// final post-completion logging cleanup
stList_destruct(reads);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = stList_construct3(0, free);
stList *allReadIdsHap2 = stList_construct3(0, free);
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, TRUE, bamChunker->chunkCount, allReadIdsHap1, allReadIdsHap2,
NULL);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical("> Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logCritical("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
// logging
time_t hapBamStart = time(NULL);
st_logInfo("> Writing final haplotyped BAMs\n");
// get all reads
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for (int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
// loggit
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotyped bams in %s\n", hapBamTDS);
// cleanup
free(hapBamTDS);
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
// cleanup
bamChunker_destruct(bamChunker);
params_destruct(params);
if (regionStr != NULL) free(regionStr);
stList_destruct(chunkOrder);
free(vcfFile);
stHash_destruct(vcfEntries);
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished phasing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
#if __aarch64__
kernel_tm_pack4.create(2 * 1, inch/4, (outch/4)/2 + (outch/4)%2, (size_t)4u*16, 16);
#else
kernel_tm_pack4.create(1, inch/4, outch/4, (size_t)4u*16, 16);
#endif
int q=0;
#if __aarch64__
for (; q+7<outch; q+=8)
{
const float* k0 = (const float*)kernel + (q+0)*inch;
const float* k1 = (const float*)kernel + (q+1)*inch;
const float* k2 = (const float*)kernel + (q+2)*inch;
const float* k3 = (const float*)kernel + (q+3)*inch;
const float* k4 = (const float*)kernel + (q+4)*inch;
const float* k5 = (const float*)kernel + (q+5)*inch;
const float* k6 = (const float*)kernel + (q+6)*inch;
const float* k7 = (const float*)kernel + (q+7)*inch;
float* g0 = kernel_tm_pack4.channel(q/8);
for (int p=0; p+3<inch; p+=4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k4[0];
g0[5] = k5[0];
g0[6] = k6[0];
g0[7] = k7[0];
g0[8] = k0[1];
g0[9] = k1[1];
g0[10] = k2[1];
g0[11] = k3[1];
g0[12] = k4[1];
g0[13] = k5[1];
g0[14] = k6[1];
g0[15] = k7[1];
g0[16] = k0[2];
g0[17] = k1[2];
g0[18] = k2[2];
g0[19] = k3[2];
g0[20] = k4[2];
g0[21] = k5[2];
g0[22] = k6[2];
g0[23] = k7[2];
g0[24] = k0[3];
g0[25] = k1[3];
g0[26] = k2[3];
g0[27] = k3[3];
g0[28] = k4[3];
g0[29] = k5[3];
g0[30] = k6[3];
g0[31] = k7[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
g0 += 32;
}
}
#endif // __aarch64__
for (; q+3<outch; q+=4)
{
const float* k0 = (const float*)kernel + (q+0)*inch;
const float* k1 = (const float*)kernel + (q+1)*inch;
const float* k2 = (const float*)kernel + (q+2)*inch;
const float* k3 = (const float*)kernel + (q+3)*inch;
#if __aarch64__
float* g0 = kernel_tm_pack4.channel(q/8+(q%8)/4);
#else
float* g0 = kernel_tm_pack4.channel(q/4);
#endif
for (int p=0; p+3<inch; p+=4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k0[1];
g0[5] = k1[1];
g0[6] = k2[1];
g0[7] = k3[1];
g0[8] = k0[2];
g0[9] = k1[2];
g0[10] = k2[2];
g0[11] = k3[2];
g0[12] = k0[3];
g0[13] = k1[3];
g0[14] = k2[3];
g0[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
#if __aarch64__
Mat tmp(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + (size%12%4)/2 + size%12%2, elemsize, elempack, opt.workspace_allocator);
#else
Mat tmp(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/12);
for (int q=0; q<inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12+(i%12)/8);
#else
float* tmpptr = tmp.channel(i/8);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0"
);
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p+1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i=0;
for (; i+11<size; i+=12)
{
const float* tmpptr = tmp.channel(i/12);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+7<size; i+=8)
{
float* tmpptr = tmp.channel(i/12+(i%12)/8);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<size; i+=4)
{
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i+1<size; i+=2)
{
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
}
for (; i<size; i++)
{
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"// r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"
);
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i=0;
#if __aarch64__
for (; i+11<size; i+=12)
{
float* tmpptr = tmp.channel(i/12);
const float* kptr0 = (const float*)kernel.channel(p/2+p%2);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif
for (; i+7<size; i+=8)
{
#if __aarch64__
float* tmpptr = tmp.channel(i/12+(i%12)/8);
const float* kptr0 = (const float*)kernel.channel(p/2+p%2);
#else
float* tmpptr = tmp.channel(i/8);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; i+3<size; i+=4)
{
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const float* kptr0 = (const float*)kernel.channel(p/2+p%2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif
}
for (; i+1<size; i+=2)
{
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
const float* kptr0 = (const float*)kernel.channel(p/2+p%2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"
);
#endif
}
for (; i<size; i++)
{
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
const float* kptr0 = (const float*)kernel.channel(p/2+p%2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"// r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"
);
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2*outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
taskdep_if0.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "omp_my_sleep.h"
int a = 0;
void task1() {
my_sleep(0.5);
a = 10;
}
void task2() {
a++;
}
int main(int argc, char** argv)
{
#pragma omp parallel shared(argc) num_threads(2)
{
#pragma omp single
{
#pragma omp task depend(out: a)
task1();
#pragma omp task if(0) depend(inout: a)
task2();
}
}
if (a != 11) {
fprintf(stderr, "fail: expected 11, but a is %d\n", a);
exit(1);
} else {
printf("pass\n");
}
return 0;
}
|
Convolution_Helper.h | #pragma once
#define CONV_HELPER_ENABLE_SSE
#define CONV_HELPER_ENABLE_OMP 1
#include "ldpMat\ldp_basic_vec.h"
#ifdef CONV_HELPER_ENABLE_OMP
#include <omp.h>
#endif
#ifdef CONV_HELPER_ENABLE_SSE
#include <xmmintrin.h> // __m128 data type and SSE functions
#endif
#pragma push_macro("min")
#pragma push_macro("max")
#undef min
#undef max
namespace conv_helper
{
// 3D volume padding by zeros
template<typename T, int N> void zero_padding3(T* dst, const T* src, ldp::Int3 srcRes)
{
ldp::Int3 dstRes = srcRes + N * 2;
for (int z = 0; z < dstRes[2]; z++)
{
T* dst_z = dst + dstRes[0] * dstRes[1] * z;
const T* src_z = src + srcRes[0] * srcRes[1] * (z - N);
if (z < N || z >= srcRes[2] + N)
{
memset(dst_z, 0, dstRes[0] * dstRes[1] * sizeof(T));
continue;
}
for (int y = 0; y < dstRes[1]; y++)
{
T* dst_y = dst_z + dstRes[0] * y;
const T* src_y = src_z + srcRes[0] * (y - N);
if (y < N || y >= srcRes[1] + N)
{
memset(dst_y, 0, dstRes[0] * sizeof(T));
continue;
}
memset(dst_y, 0, N * sizeof(T));
memset(dst_y + srcRes[0] + N, 0, N * sizeof(T));
for (int x = N; x < dstRes[0] - N; x++)
dst_y[x] = src_y[x - N];
}// y
}// z
}
template<typename T, int N> void zero_padding2(T* dst, const T* src, ldp::Int2 srcRes)
{
ldp::Int2 dstRes = srcRes + N * 2;
for (int y = 0; y < dstRes[1]; y++)
{
T* dst_y = dst + dstRes[0] * y;
const T* src_y = src + srcRes[0] * (y - N);
if (y < N || y >= srcRes[1] + N)
{
memset(dst_y, 0, dstRes[0] * sizeof(T));
continue;
}
memset(dst_y, 0, N * sizeof(T));
memset(dst_y + srcRes[0] + N, 0, N * sizeof(T));
for (int x = N; x < dstRes[0] - N; x++)
dst_y[x] = src_y[x - N];
}// y
}
// 1D max filter
template<typename T, int N> void max_filter(T* dst, const T* src,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
// the first few elements that does not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = std::numeric_limits<T>::lowest();
for (int k = xb; k <= xe; k++)
if (src[k + x] > v)
v = src[k + x];
*dst = v;
dst += dstStride;
}
// middle elements that fullfills the conv kernel
for (int x = R; x < tail_pos; x++)
{
T v = std::numeric_limits<T>::lowest();
for (int k = -L; k <= R; k++)
if (src[k + x] > v)
v = src[k + x];
*dst = v;
dst += dstStride;
}// end for x
// the last few elements that does not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = std::numeric_limits<T>::lowest();
for (int k = xb; k <= xe; k++)
if (src[k + x] > v)
v = src[k + x];
*dst = v;
dst += dstStride;
}
}
template<typename T, int N> void min_filter(T* dst, const T* src,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
// the first few elements that does not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = std::numeric_limits<T>::max();
for (int k = xb; k <= xe; k++)
if (src[k + x] < v)
v = src[k + x];
*dst = v;
dst += dstStride;
}
// middle elements that fullfills the conv kernel
for (int x = R; x < tail_pos; x++)
{
T v = std::numeric_limits<T>::max();
for (int k = -L; k <= R; k++)
if (src[k + x] < v)
v = src[k + x];
*dst = v;
dst += dstStride;
}// end for x
// the last few elements that does not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = std::numeric_limits<T>::max();
for (int k = xb; k <= xe; k++)
if (src[k + x] < v)
v = src[k + x];
*dst = v;
dst += dstStride;
}
}
#ifdef CONV_HELPER_ENABLE_SSE
template<int N> void max_filter_sse(float* dst, const float* src,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
__m128 s;
// the first few elements that does not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_set_ps1(std::numeric_limits<float>::lowest());
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_max_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
// middle elements that fullfills the conv kernel
for (int x = R; x < tail_pos; x++)
{
__m128 v = _mm_set_ps1(std::numeric_limits<float>::lowest());
for (int k = -L; k <= R; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_max_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}// end for x
// the last few elements that does not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_set_ps1(std::numeric_limits<float>::lowest());
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_max_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
}
template<int N> void min_filter_sse(float* dst, const float* src,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
__m128 s;
// the first few elements that does not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_set_ps1(std::numeric_limits<float>::max());
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_min_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
// middle elements that fullfills the conv kernel
for (int x = R; x < tail_pos; x++)
{
__m128 v = _mm_set_ps1(std::numeric_limits<float>::max());
for (int k = -L; k <= R; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_min_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}// end for x
// the last few elements that does not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_set_ps1(std::numeric_limits<float>::max());
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
v = _mm_min_ps(v, s);
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
}
#endif
// 3D max filter
// @dim:
// 0, filter x;
// 1, filter y;
// 2, filter z;
// -1[default], filter all directions
template<typename T, int N, int numThreads = 4> void max_filter3(
T* srcDst, ldp::Int3 res, int dim = -1)
{
T* dstPtr_000 = srcDst;
const int y_stride = res[0];;
const int z_stride = res[0] * res[1];
if (dim < -1 || dim > 2)
throw std::exception("illegal input parameter @dim");
// allocate buffer for thread data
std::vector<T> tmpBuffers[numThreads];
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128> tmpBuffers_sse[numThreads];
#endif
for (int k = 0; k < numThreads; k++)
{
tmpBuffers[k].resize(std::max(res[0], std::max(res[1], res[2])));
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
tmpBuffers_sse[k].resize(std::max(res[0], std::max(res[1], res[2])));
#endif
}
if (dim == 0 || dim == -1)
{
// max filtering along x direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00z = dstPtr_000 + z * z_stride;
for (int y = 0; y < res[1]; y++)
{
T* dstPtr = dstPtr_00z + y * y_stride;
for (int x = 0; x < res[0]; x++)
tmpBuffer[x] = dstPtr[x];
max_filter<T, N>(dstPtr, tmpBuffer.data(), res[0], 1);
}// end for y
}// end for z
}// end if dim == 0
if (dim == 1 || dim == -1)
{
// max filtering along y direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00z = dstPtr_000 + z * z_stride;
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer_sse[y] = _mm_loadu_ps((float*)dstPtr + y1);
max_filter_sse<N>((float*)dstPtr, (const float*)tmpBuffer_sse.data(), res[1], y_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer[y] = dstPtr[y1];
max_filter<T, N>(dstPtr, tmpBuffer.data(), res[1], y_stride);
}// end for x
}// end for z
}// end if dim == 1
if ((dim == 2 || dim == -1)&&res[2]>1)
{
// max filtering along z direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int y = 0; y < res[1]; y++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00y = dstPtr_000 + y * y_stride;
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer_sse[z] = _mm_loadu_ps((float*)&dstPtr[z1]);
max_filter_sse<N>((float*)dstPtr, (const float*)tmpBuffer_sse.data(), res[2], z_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer[z] = dstPtr[z1];
max_filter<T, N>(dstPtr, tmpBuffer.data(), res[2], z_stride);
}// end for x
}// end for y
}// end if dim == 2
}
template<typename T, int N, int numThreads = 4> void min_filter3(
T* srcDst, ldp::Int3 res, int dim = -1)
{
T* dstPtr_000 = srcDst;
const int y_stride = res[0];;
const int z_stride = res[0] * res[1];
if (dim < -1 || dim > 2)
throw std::exception("illegal input parameter @dim");
// allocate buffer for thread data
std::vector<T> tmpBuffers[numThreads];
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128> tmpBuffers_sse[numThreads];
#endif
for (int k = 0; k < numThreads; k++)
{
tmpBuffers[k].resize(std::max(res[0], std::max(res[1], res[2])));
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
tmpBuffers_sse[k].resize(std::max(res[0], std::max(res[1], res[2])));
#endif
}
if (dim == 0 || dim == -1)
{
// max filtering along x direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00z = dstPtr_000 + z * z_stride;
for (int y = 0; y < res[1]; y++)
{
T* dstPtr = dstPtr_00z + y * y_stride;
for (int x = 0; x < res[0]; x++)
tmpBuffer[x] = dstPtr[x];
min_filter<T, N>(dstPtr, tmpBuffer.data(), res[0], 1);
}// end for y
}// end for z
}// end if dim == 0
if (dim == 1 || dim == -1)
{
// max filtering along y direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00z = dstPtr_000 + z * z_stride;
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer_sse[y] = _mm_loadu_ps((float*)dstPtr + y1);
min_filter_sse<N>((float*)dstPtr, (const float*)tmpBuffer_sse.data(), res[1], y_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer[y] = dstPtr[y1];
min_filter<T, N>(dstPtr, tmpBuffer.data(), res[1], y_stride);
}// end for x
}// end for z
}// end if dim == 1
if ((dim == 2 || dim == -1) && res[2]>1)
{
// max filtering along z direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int y = 0; y < res[1]; y++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00y = dstPtr_000 + y * y_stride;
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer_sse[z] = _mm_loadu_ps((float*)&dstPtr[z1]);
min_filter_sse<N>((float*)dstPtr, (const float*)tmpBuffer_sse.data(), res[2], z_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer[z] = dstPtr[z1];
min_filter<T, N>(dstPtr, tmpBuffer.data(), res[2], z_stride);
}// end for x
}// end for y
}// end if dim == 2
}
template<typename T, int N, int numThreads = 4> void max_filter2(
T* srcDst, ldp::Int2 res, int dim = -1)
{
max_filter3<T, N, numThreads>(srcDst, ldp::Int3(res[0], res[1], 1), dim);
}
template<typename T, int N, int numThreads = 4> void min_filter2(
T* srcDst, ldp::Int2 res, int dim = -1)
{
min_filter3<T, N, numThreads>(srcDst, ldp::Int3(res[0], res[1], 1), dim);
}
// 1D conv, the same with matlab conv(..., 'same')
// assume:
// the stride of src is 1
// the size of src @num
// the size of dst @num
// kernel size is @N
template<typename T, int N> void conv(T* dst, const T* src, const T* kernel,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
// the first few elements that does not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = 0;
for (int k = xb; k <= xe; k++)
v += src[k + x] * kernel[R - k];
*dst = v;
dst += dstStride;
}
// middle elements that fullfills the conv kernel
for (int x = R; x < tail_pos; x++)
{
T v = 0;
for (int k = -L; k <= R; k++)
v += src[k + x] * kernel[R - k];
*dst = v;
dst += dstStride;
}// end for x
// the last few elements that does not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
T v = 0;
for (int k = xb; k <= xe; k++)
v += src[k + x] * kernel[R - k];
*dst = v;
dst += dstStride;
}
}
#ifdef CONV_HELPER_ENABLE_SSE
template<int N> void conv_sse(float* dst, const float* src, const float* kernel,
int num, int dstStride)
{
const static int L = N / 2 - (N % 2 == 0);
const static int R = N / 2;
const int head_pos = std::min((int)num, R);
const int tail_pos = num - R;
const int tail_head_pos = std::max(head_pos, tail_pos);
__m128 s, knl;
// the first few elements that do not fullfill the conv kernel
for (int x = 0; x < head_pos; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_setzero_ps();
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
knl = _mm_set_ps1(kernel[R - k]);
v = _mm_add_ps(v, _mm_mul_ps(s, knl));
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
// middle elements that fullfill the conv kernel
for (int x = R; x < tail_pos; x++)
{
__m128 v = _mm_setzero_ps();
for (int k = -L; k <= R; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
knl = _mm_set_ps1(kernel[R - k]);
v = _mm_add_ps(v, _mm_mul_ps(s, knl));
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}// end for x
// the last few elements that do not fullfill the conv kernel
for (int x = tail_head_pos; x < num; x++)
{
const int xb = std::max(-L, -x);
const int xe = std::min(num - x - 1, R);
__m128 v = _mm_setzero_ps();
for (int k = xb; k <= xe; k++)
{
s = _mm_loadu_ps(src + (k + x) * 4);
knl = _mm_set_ps1(kernel[R - k]);
v = _mm_add_ps(v, _mm_mul_ps(s, knl));
}
_mm_storeu_ps(dst, v);
dst += dstStride;
}
}
#endif
// 3D seperate-kernel convolution with 'same' output
// this method is the SAME as calling matlab convn(...,'same') along x-y-z 3 dims.
// @data:
// X-Y-Z ordered 3D data
// @kernel[N]
// @res: resolution of the input 3D data
// @dim:
// 0, conv x;
// 1, conv y;
// 2, conv z;
// -1[default], conv all directions
template<typename T, int N, int numThreads = 4> void conv3(T* srcDst,
const T* kernel, ldp::Int3 res, int dim = -1)
{
T* dstPtr_000 = srcDst;
const int y_stride = res[0];;
const int z_stride = res[0]*res[1];
if (dim < -1 || dim > 2)
throw std::exception("illegal input parameter @dim");
// allocate buffer for thread data
std::vector<T> tmpBuffers[numThreads];
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128> tmpBuffers_sse[numThreads];
#endif
for (int k = 0; k < numThreads; k++)
{
tmpBuffers[k].resize(std::max(res[0], std::max(res[1], res[2])));
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
tmpBuffers_sse[k].resize(std::max(res[0], std::max(res[1], res[2])));
#endif
}
if (dim == 0 || dim == -1)
{
// conv along x direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
T* dstPtr_00z = dstPtr_000 + z * z_stride;
for (int y = 0; y < res[1]; y++)
{
T* dstPtr = dstPtr_00z + y * y_stride;
for (int x = 0; x < res[0]; x++)
tmpBuffer[x] = dstPtr[x];
conv<T, N>(dstPtr, tmpBuffer.data(), kernel, res[0], 1);
}// end for y
}// end for z
}// end if dim == 0
if (dim == 1 || dim == -1)
{
// conv along y direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
T* dstPtr_00z = dstPtr_000 + z * z_stride;
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer_sse[y] = _mm_loadu_ps((float*)dstPtr + y1);
conv_sse<N>(dstPtr, (const float*)tmpBuffer_sse.data(), kernel, res[1], y_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00z + x;
for (int y = 0, y1 = 0; y < res[1]; y++, y1 += y_stride)
tmpBuffer[y] = dstPtr[y1];
conv<T, N>(dstPtr, tmpBuffer.data(), kernel, res[1], y_stride);
}// end for x
}// end for z
}// end if dim == 1
if ((dim == 2 || dim == -1) && res[2]>1)
{
// conv along z direction
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int y = 0; y < res[1]; y++)
{
std::vector<T>& tmpBuffer = tmpBuffers[omp_get_thread_num()];
#ifdef CONV_HELPER_ENABLE_SSE
std::vector<__m128>& tmpBuffer_sse =
tmpBuffers_sse[omp_get_thread_num()];
#endif
T* dstPtr_00y = dstPtr_000 + y * y_stride;
int x = 0;
#ifdef CONV_HELPER_ENABLE_SSE
if (ldp::is_float<T>::value)
{
for (; x < res[0] - 3; x += 4)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer_sse[z] = _mm_loadu_ps((float*)&dstPtr[z1]);
conv_sse<N>((float*)dstPtr, (const float*)tmpBuffer_sse.data(), kernel, res[2], z_stride);
}
}
#endif
for (; x < res[0]; x++)
{
T* dstPtr = dstPtr_00y + x;
for (int z = 0, z1 = 0; z < res[2]; z++, z1 += z_stride)
tmpBuffer[z] = dstPtr[z1];
conv<T, N>(dstPtr, tmpBuffer.data(), kernel, res[2], z_stride);
}// end for x
}// end for y
}// end if dim == 2
}
template<typename T, int N, int numThreads = 4> void conv2(T* srcDst,
const T* kernel, ldp::Int2 res, int dim = -1)
{
conv3<T, N, numThreads>(srcDst, kernel, ldp::Int3(res[0], res[1], 1), dim);
}
// 3D box filter
// it is done by integral images
// temporary memroy will be allocated inside
template<typename T, int numThreads = 4> void boxFilter(T* dst, const T*src, int boxSize, ldp::Int3 res)
{
if (dst == src)
throw std::exception("boxFilter(): src and dst cannot be the same memory!");
std::vector<T> intImg((1+res[0]) * (1+res[1]) * (1+res[2]), 0);
const static int L = boxSize / 2 - (boxSize % 2 == 0);
const static int R = boxSize / 2;
const int stride_z_intg = (res[0] + 1)*(res[1] + 1);
const int stride_z_srcDst = res[0] * res[1];
// integral along z
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int y = 0; y < res[1]; y++)
{
const T* src_y = src + y*res[0];
T* intg_y = intImg.data() + (y + 1)*(res[0] + 1);
for (int z = 0; z < res[2]; z++)
{
const T* src_z = src_y + z*stride_z_srcDst;
T* intg_z = intg_y + (z + 1)*stride_z_intg;
const T* intg_z_prev = intg_z - stride_z_intg;
for (int x = 0; x < res[0]; x++)
intg_z[x + 1] = intg_z_prev[x + 1] + src_z[x];
}// z
}// y
// diff along z
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int y = 0; y < res[1]; y++)
{
T* dst_y = dst + y*res[0];
const T* intg_y = intImg.data() + (y + 1)*(res[0] + 1);
for (int z = 0; z < res[2]; z++)
{
T* dst_z = dst_y + z*stride_z_srcDst;
const T* intg_z_after = intg_y + std::min(z + R + 1, res[2])*stride_z_intg;
const T* intg_z_prev = intg_y + std::max(z - L, 0)*stride_z_intg;
for (int x = 0; x < res[0]; x++)
dst_z[x] = intg_z_after[x + 1] - intg_z_prev[x + 1];
}// z
}// y
// integral along y
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
const T* dst_z = dst + z*stride_z_srcDst;
T* intg_z = intImg.data() + (z + 1)*stride_z_intg;
for (int y = 0; y < res[1]; y++)
{
const T* dst_y = dst_z + y*res[0];
T* intg_y = intg_z + (y+1)*(res[0]+1);
const T* intg_y_prev = intg_y - (res[0]+1);
for (int x = 0; x < res[0]; x++)
intg_y[x+1] = intg_y_prev[x+1] + dst_y[x];
}// y
}// z
// diff along y
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
T* dst_z = dst + z*stride_z_srcDst;
const T* intg_z = intImg.data() + (z + 1)*stride_z_intg;
for (int y = 0; y < res[1]; y++)
{
T* dst_y = dst_z + y*res[0];
const T* intg_y_after = intg_z + std::min(y + R + 1, res[1])*(res[0] + 1);
const T* intg_y_prev = intg_z + std::max(y - L, 0)*(res[0] + 1);
for (int x = 0; x < res[0]; x++)
dst_y[x] = intg_y_after[x + 1] - intg_y_prev[x + 1];
}// y
}// z
// integral along x
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
const T* dst_z = dst + z*stride_z_srcDst;
T* intg_z = intImg.data() + (z + 1)*stride_z_intg;
for (int y = 0; y < res[1]; y++)
{
const T* dst_y = dst_z + y*res[0];
T* intg_y = intg_z + (y + 1)*(res[0] + 1);
for (int x = 0; x < res[0]; x++)
intg_y[x+1] = intg_y[x] + dst_y[x];
}// y
}// z
// diff along x
const int pos[] = { std::min(L, res[0]), std::max(0, res[0] - R - 1) };
#pragma omp parallel for num_threads(numThreads) if(CONV_HELPER_ENABLE_OMP)
for (int z = 0; z < res[2]; z++)
{
T* dst_z = dst + z*stride_z_srcDst;
const T* intg_z = intImg.data() + (z + 1)*stride_z_intg;
for (int y = 0; y < res[1]; y++)
{
T* dst_y = dst_z + y*res[0];
const T* intg_y = intg_z + (y + 1)*(res[0] + 1);
for (int x = 0; x < pos[0]; x++)
{
int prev = std::max(x - L, 0);
int after = std::min(x + R + 1, res[0]);
dst_y[x] = intg_y[after] - intg_y[prev];
}
for (int x = pos[0]; x < pos[1]; x++)
dst_y[x] = intg_y[x + R + 1] - intg_y[x - L];
for (int x = pos[1]; x < res[0]; x++)
{
int prev = std::max(x - L, 0);
int after = std::min(x + R + 1, res[0]);
dst_y[x] = intg_y[after] - intg_y[prev];
}
}// y
}// z
}
}
#pragma pop_macro("max")
#pragma pop_macro("min") |
dlacpy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlacpy.c, normal z -> d, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lacpy
*
* Copies general rectangular or upper or lower triangular part of
* a two-dimensional m-by-n matrix A to another m-by-n matrix B.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the part of the matrix A to be copied to B.
* - PlasmaGeneral: General rectangular matrix A
* - PlasmaUpper: Upper triangular part of A
* - PlasmaLower: Lower triangular part of A
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] pA
* The m-by-n matrix A. If uplo = PlasmaUpper, only the upper trapezium
* is accessed; if uplo = PlasmaLower, only the lower trapezium is
* accessed.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] pB
* The m-by-n matrix B.
* On exit, B = A in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dlacpy
* @sa plasma_clacpy
* @sa plasma_dlacpy
* @sa plasma_slacpy
*
******************************************************************************/
int plasma_dlacpy(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
double *pA, int lda,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (transa != PlasmaNoTrans && m != n) {
plasma_error("illegal value of m and n");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -6;
}
if (ldb < imax(1, (transa == PlasmaNoTrans ? m : n))) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lacpy(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A, B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_dlacpy(uplo, transa, A, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lacpy
*
* Copies general rectangular or upper or lower triangular part of
* a two-dimensional m-by-n matrix A to another m-by-n matrix B. Non-blocking
* tile version of plasma_dlacpy(). May return before the computation is
* finished. Operates on matrices stored by tiles. All matrices are passed
* through descriptors. All dimensions are taken from the descriptors. Allows
* for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the part of the matrix A to be copied to B.
* - PlasmaGeneral: General rectangular matrix A
* - PlasmaUpper: Upper triangular part of A
* - PlasmaLower: Lower triangular part of A
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dlacpy
* @sa plasma_omp_clacpy
* @sa plasma_omp_dlacpy
* @sa plasma_omp_slacpy
*
******************************************************************************/
void plasma_omp_dlacpy(plasma_enum_t uplo, plasma_enum_t transa,
plasma_desc_t A, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pdlacpy(uplo, transa, A, B, sequence, request);
}
|
FastMultipoleMethod.h | /*
Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Aboria.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FAST_MULTIPOLE_METHOD_H_
#define FAST_MULTIPOLE_METHOD_H_
#include "detail/FastMultipoleMethod.h"
#ifdef HAVE_EIGEN
#include "detail/Kernels.h"
#endif
namespace Aboria {
template <typename Expansions, typename Kernel, typename RowParticles,
typename ColParticles>
class FastMultipoleMethod {
protected:
typedef typename RowParticles::query_type row_query_type;
typedef typename ColParticles::query_type col_query_type;
typedef typename row_query_type::reference row_reference;
typedef typename row_query_type::pointer row_pointer;
typedef typename row_query_type::child_iterator row_child_iterator;
typedef typename row_query_type::particle_iterator row_particle_iterator;
typedef typename row_particle_iterator::reference row_particle_reference;
typedef typename col_query_type::reference col_reference;
typedef typename col_query_type::pointer col_pointer;
typedef typename col_query_type::child_iterator col_child_iterator;
typedef typename col_query_type::particle_iterator col_particle_iterator;
typedef typename col_particle_iterator::reference col_particle_reference;
typedef typename Expansions::l_expansion_type l_expansion_type;
typedef typename Expansions::m_expansion_type m_expansion_type;
typedef typename ColParticles::traits_type traits_type;
typedef typename traits_type::template vector_type<l_expansion_type>::type
l_storage_type;
typedef typename traits_type::template vector_type<m_expansion_type>::type
m_storage_type;
typedef typename traits_type::template vector_type<
col_child_iterator
// typename std::remove_const<child_iterator>::type
>::type col_child_iterator_vector_type;
typedef typename traits_type::template vector_type<
col_child_iterator_vector_type>::type connectivity_type;
typedef typename traits_type::double_d double_d;
typedef typename traits_type::position position;
static const unsigned int dimension = traits_type::dimension;
typedef bbox<dimension> box_type;
mutable m_storage_type m_W;
mutable l_storage_type m_g;
mutable connectivity_type m_connectivity;
const ColParticles *m_col_particles;
const RowParticles *m_row_particles;
const col_query_type *m_col_query;
const row_query_type *m_row_query;
Expansions m_expansions;
Kernel m_kernel;
const int m_num_tasks;
public:
FastMultipoleMethod(const RowParticles &row_particles,
const ColParticles &col_particles,
const Expansions &expansions, const Kernel &kernel)
: m_col_particles(&col_particles), m_row_particles(&row_particles),
m_col_query(&col_particles.get_query()),
m_row_query(&row_particles.get_query()), m_expansions(expansions),
m_kernel(kernel),
#ifdef HAVE_OPENMP
m_num_tasks(omp_get_max_threads())
#else
m_num_tasks(1)
#endif
{
}
// target_vector += A*source_vector
template <typename VectorTypeTarget, typename VectorTypeSource>
void matrix_vector_multiply(VectorTypeTarget &target_vector,
const VectorTypeSource &source_vector) const {
CHECK(target_vector.size() == source_vector.size(),
"source and target vector not same length")
m_W.resize(m_col_query->number_of_buckets());
m_g.resize(m_row_query->number_of_buckets());
m_connectivity.resize(m_row_query->number_of_buckets());
// upward sweep of tree
//
/*
#pragma omp parallel default(none) \
shared(source_vector, target_vector, m_num_tasks, m_W, m_g, m_col_query, \
m_row_query, m_expansions, m_kernel, m_connectivity)
*/
#pragma omp parallel shared(source_vector, target_vector)
{
#pragma omp single
{
const int nchild_col = m_col_query->num_children();
for (col_child_iterator ci =
m_col_particles->get_query().get_children();
ci != false; ++ci) {
/*
#pragma omp task default(none) firstprivate(ci) \ shared(source_vector,
m_num_tasks, m_W, m_col_query, m_expansions)
*/
#pragma omp task default(shared) firstprivate(ci) shared(source_vector)
calculate_dive_P2M_and_M2M(ci, source_vector,
m_num_tasks - nchild_col);
}
#pragma omp taskwait
// downward sweep of tree.
//
const int nchild_row = m_row_query->num_children();
for (row_child_iterator ci = m_row_query->get_children(); ci != false;
++ci) {
/*
#pragma omp task default(none) firstprivate(ci) \
shared(target_vector, source_vector, m_num_tasks, m_W, m_g, m_col_query, \
m_row_query, m_expansions, m_kernel, m_connectivity)
*/
#pragma omp task default(shared) firstprivate(ci) \
shared(target_vector, source_vector)
{
col_child_iterator_vector_type dummy;
l_expansion_type g{};
calculate_dive_M2L_and_L2L(target_vector, dummy, g, box_type(), ci,
source_vector, m_num_tasks - nchild_row);
}
}
#pragma omp taskwait
}
}
}
private:
template <typename VectorType>
m_expansion_type &calculate_dive_P2M_and_M2M(const col_child_iterator &ci,
const VectorType &source_vector,
const int num_tasks) const {
const size_t my_index = m_col_query->get_bucket_index(*ci);
const box_type &my_box = m_col_query->get_bounds(ci);
LOG(3, "calculate_dive_P2M_and_M2M with bucket " << my_box);
m_expansion_type &W = m_W[my_index];
typedef detail::VectorTraits<typename m_expansion_type::value_type>
vector_traits;
std::fill(std::begin(W), std::end(W), vector_traits::Zero());
if (m_col_query->is_leaf_node(*ci)) { // leaf node
detail::calculate_P2M(W, my_box, m_col_query->get_bucket_particles(*ci),
source_vector, m_col_query->get_particles_begin(),
m_expansions);
} else {
if (num_tasks > 0) {
const int nchildren = m_col_query->num_children(ci);
for (col_child_iterator cj = m_col_query->get_children(ci); cj != false;
++cj) {
/*
#pragma omp task default(none) firstprivate(cj) \
shared(source_vector, W, my_box, m_W, m_col_query, m_expansions)
*/
#pragma omp task default(shared) firstprivate(cj) \
shared(source_vector, W, my_box)
{
m_expansion_type &child_W = calculate_dive_P2M_and_M2M(
cj, source_vector, num_tasks - nchildren);
m_expansion_type localW{};
const box_type &child_box = m_col_query->get_bounds(cj);
m_expansions.M2M(localW, my_box, child_box, child_W);
for (size_t i = 0; i < localW.size(); ++i) {
detail::VectorTraits<typename m_expansion_type::value_type>::
AtomicIncrement(W[i], localW[i]);
}
}
}
#pragma omp taskwait
} else {
for (col_child_iterator cj = m_col_query->get_children(ci); cj != false;
++cj) {
m_expansion_type &child_W =
calculate_dive_P2M_and_M2M(cj, source_vector, num_tasks);
const box_type &child_box = m_col_query->get_bounds(cj);
m_expansions.M2M(W, my_box, child_box, child_W);
}
}
}
return W;
}
/*
template <typename VectorType>
void calculate_dive_P2M_and_M2M(const tree_t &tree,
const VectorType &source_vector) const {
m_multipoles.resize(m_col_query.number_of_buckets());
for (auto level = tree.rbegin(); level != --tree.rend(); ++level) {
detail::for_each(level.begin(), level.end(), [](child_iterator ci) {
const auto parent_box = m_col_query->get_parent_bounds(ci);
const size_t parent_index = m_col_query->get_parent_index(ci);
auto &parent_multipole = m_multipoles[parent_index];
const auto box = m_col_query->get_bounds(ci);
LOG(3, "calculate_dive_P2M_and_M2M with bucket " << box);
const size_t index = m_col_query->get_bucket_index(*ci);
auto &multipole = m_multipoles[index];
if (m_col_query->is_leaf_node(*ci)) { // leaf node
detail::calculate_P2M(
multipole, box, m_col_query->get_bucket_particles(*ci),
source_vector, m_col_query->get_particles_begin(),
m_expansions);
}
m_expansions.M2M(parent_multipole, parent_box, box, multipole);
}
});
}
}
template <typename VectorTypeTarget, typename VectorTypeSource>
void calculate_dive_M2L_and_L2L(const tree_t &source_tree,
const tree_t &target_tree,
VectorTypeTarget &target_vector,
const VectorTypeSource &source_vector) const {
auto target_level = target_tree.begin();
auto source_level = source_tree.begin();
int2_vector_t current_level(1, vint4(0, 0, 0, 0));
int2_vector_t next_level;
for (; target_level != target_tree.end(); ++target_level, ++source_level) {
// execute L2L and L2P on target_tree
detail::for_each(
++target_level.begin(), target_level.end(), [](child_iterator ci) {
const auto parent_box = m_col_query->get_parent_bounds(ci);
const size_t parent_index = m_col_query->get_parent_index(ci);
auto &parent_multipole = m_multipoles[parent_index];
const auto box = m_col_query->get_bounds(ci);
LOG(3, "calculate_L2L with bucket " << box);
const size_t index = m_col_query->get_bucket_index(*ci);
auto &multipole = m_multipoles[index];
m_expansions.L2L(g, box, box_parent, g_parent);
if (m_col_query->is_leaf_node(*ci)) { // leaf node
detail::calculate_L2P(target_vector, local, box,
m_row_query->get_bucket_particles(*ci),
m_row_query->get_particles_begin(),
m_expansions);
}
});
// determine number of children ( P2P (leaf+leaf) M2L(node+node+theta) = 0
// children, 1 leaf = nc, 0 leaf = nc*nc)
num_children.resize(current_level.size());
auto count_children = [](const vint4 &ij) {
int num_children = 0;
const auto &ci = target_tree[ij[0]][ij[1]];
const auto &cj = source_tree[ij[2]][ij[3]];
const bool ci_is_leaf = m_row_query->is_leaf_node(*ci);
const bool cj_is_leaf = m_col_query->is_leaf_node(*cj);
if (ci_is_leaf && cj_is_leaf) {
return 0;
} else if (detail::theta_condition < dimension()) {
return 0;
} else if (ci_is_leaf) {
return m_col_query->number_of_children(cj);
} else if (cj_is_leaf) {
return m_row_query->number_of_children(ci);
} else {
return m_row_query->number_of_children(ci) *
m_col_query->number_of_children(cj);
}
};
detail::transform(current_level.begin(), current_level.end(),
num_children.begin(), count_children);
// partion pairs by number of children = 0
auto ppoint = detail::partition(current_level.begin(),
current_level.end(), num_children.begin(),
[](const int nc) { return nc > 0; });
// execute P2P and M2L ops (nc = 0)
detail::for_each(ppoint, current_level.end(), []());
// enumerate the number of children
detail::exclusive_scan(num_children.begin(),num_children.end());
// create next level
// count number of children
// create new level
next_level.resize(num_children);
detail::for_each(
traits_t::make_zip_iterator(
traits_t::make_tuple(current_level.begin(),
num_children.begin())), traits_t::make_zip_iterator(
traits_t::make_tuple(current_level.end(), num_children.end())),
next_level.end(), [](auto i) {
auto ij = i.template get<0>();
auto ci = target_level[ij[0]];
auto cj = source_level[ij[1]];
int next_index = i.template get<1>();
for (int ci_index = target_next_index[ij[0]]; ci != false;
++ci, ++ci_index) {
size_t target_box = m_row_query->get_bounds(ci);
detail::theta_condition<dimension> theta(target_box.bmin,
target_box.bmax);
for (int cj_index = source_next_index[ij[1]]; cj != false;
++cj, ++cj_index) {
size_t source_box = m_col_query->get_bounds(cj);
if (theta.check(source_box.bmin, source_box.bmax)) {
if (is_leaf(ci, cj)) {
// do P2P or M2L
m_expansions.M2L(g, target_box, source_box, m_W[source_index]);
} else {
_next_level[next_index++] = vint2(ci_index, cj_index);
}
}
}
return num_children;
});
// swap to current level
current_level.swap(next_level);
// expand parents
const box_type &source_box = m_row_query->get_bounds(ci);
LOG(3, "calculate_dive_M2L_and_L2L with bucket " << target_box);
size_t target_index = m_row_query->get_bucket_index(*ci);
l_expansion_type &g = m_g[target_index];
typedef detail::VectorTraits<typename l_expansion_type::value_type>
vector_traits;
std::fill(std::begin(g), std::end(g), vector_traits::Zero());
typename connectivity_type::reference connected_buckets =
m_connectivity[target_index];
connected_buckets.clear();
if (connected_buckets_parent.empty()) {
for (col_child_iterator cj = m_col_query->get_children(); cj != false;
++cj) {
const box_type &source_box = m_col_query->get_bounds(cj);
if (theta.check(source_box.bmin, source_box.bmax)) {
connected_buckets.push_back(cj);
} else {
size_t source_index = m_col_query->get_bucket_index(*cj);
m_expansions.M2L(g, target_box, source_box, m_W[source_index]);
}
}
} else {
// expansion from parent
m_expansions.L2L(g, target_box, box_parent, g_parent);
// expansions from weakly connected buckets on this level
// and store strongly connected buckets to connectivity list
for (const col_child_iterator &source : connected_buckets_parent) {
if (m_col_query->is_leaf_node(*source)) {
connected_buckets.push_back(source);
} else {
for (col_child_iterator cj = m_col_query->get_children(source);
cj != false; ++cj) {
const box_type &source_box = m_col_query->get_bounds(cj);
if (theta.check(source_box.bmin, source_box.bmax)) {
connected_buckets.push_back(cj);
} else {
size_t source_index = m_col_query->get_bucket_index(*cj);
m_expansions.M2L(g, target_box, source_box,
m_W[source_index]);
}
}
}
}
}
if (!m_row_query->is_leaf_node(*ci)) { // leaf node
for (row_child_iterator cj = m_row_query->get_children(ci); cj !=
false;
++cj) {
calculate_dive_M2L_and_L2L(target_vector, connected_buckets, g,
target_box, cj, source_vector);
}
} else if (target_vector.size() > 0) {
detail::calculate_L2P(target_vector, g, target_box,
m_row_query->get_bucket_particles(*ci),
m_row_query->get_particles_begin(),
m_expansions);
for (col_child_iterator &cj : connected_buckets) {
if (m_col_query->is_leaf_node(*cj)) {
LOG(3, "calculate_P2P: target = " << target_box << " source = "
<< m_col_query->get_bounds(cj));
detail::calculate_P2P(target_vector, source_vector,
m_row_query->get_bucket_particles(*ci),
m_col_query->get_bucket_particles(*cj),
m_row_query->get_particles_begin(),
m_col_query->get_particles_begin(),
m_kernel); } else { for (auto j = m_col_query->get_subtree(cj); j != false;
++j) { if (m_col_query->is_leaf_node(*j)) {
detail::calculate_P2P(target_vector, source_vector,
m_row_query->get_bucket_particles(*ci),
m_col_query->get_bucket_particles(*j),
m_row_query->get_particles_begin(),
m_col_query->get_particles_begin(),
m_kernel);
}
}
}
}
}
*/
template <typename VectorTypeTarget, typename VectorTypeSource>
void calculate_dive_M2L_and_L2L(
VectorTypeTarget &target_vector,
const col_child_iterator_vector_type &connected_buckets_parent,
const l_expansion_type &g_parent, const box_type &box_parent,
const row_child_iterator &ci, const VectorTypeSource &source_vector,
const int num_tasks) const {
const box_type &target_box = m_row_query->get_bounds(ci);
LOG(3, "calculate_dive_M2L_and_L2L with bucket " << target_box);
size_t target_index = m_row_query->get_bucket_index(*ci);
l_expansion_type &g = m_g[target_index];
typedef detail::VectorTraits<typename l_expansion_type::value_type>
vector_traits;
std::fill(std::begin(g), std::end(g), vector_traits::Zero());
typename connectivity_type::reference connected_buckets =
m_connectivity[target_index];
connected_buckets.clear();
detail::theta_condition<dimension> theta(target_box.bmin, target_box.bmax);
if (connected_buckets_parent.empty()) {
for (col_child_iterator cj = m_col_query->get_children(); cj != false;
++cj) {
const box_type &source_box = m_col_query->get_bounds(cj);
if (theta.check(source_box.bmin, source_box.bmax)) {
connected_buckets.push_back(cj);
} else {
size_t source_index = m_col_query->get_bucket_index(*cj);
m_expansions.M2L(g, target_box, source_box, m_W[source_index]);
}
}
} else {
// expansion from parent
m_expansions.L2L(g, target_box, box_parent, g_parent);
// expansions from weakly connected buckets on this level
// and store strongly connected buckets to connectivity list
for (const col_child_iterator &source : connected_buckets_parent) {
if (m_col_query->is_leaf_node(*source)) {
connected_buckets.push_back(source);
} else {
for (col_child_iterator cj = m_col_query->get_children(source);
cj != false; ++cj) {
const box_type &source_box = m_col_query->get_bounds(cj);
if (theta.check(source_box.bmin, source_box.bmax)) {
connected_buckets.push_back(cj);
} else {
size_t source_index = m_col_query->get_bucket_index(*cj);
m_expansions.M2L(g, target_box, source_box, m_W[source_index]);
}
}
}
}
}
if (!m_row_query->is_leaf_node(*ci)) { // leaf node
if (num_tasks > 0) {
const int nchildren = m_row_query->num_children(ci);
for (row_child_iterator cj = m_row_query->get_children(ci); cj != false;
++cj) {
/*
#pragma omp task default(none) firstprivate(cj) shared( \
target_vector, connected_buckets, g, target_box, source_vector, m_W, m_g, \
m_col_query, m_row_query, m_expansions, m_kernel, m_connectivity)
*/
#pragma omp task default(shared) firstprivate(cj) \
shared(target_vector, connected_buckets, g, target_box, source_vector)
calculate_dive_M2L_and_L2L(target_vector, connected_buckets, g,
target_box, cj, source_vector,
num_tasks - nchildren);
}
#pragma omp taskwait
} else {
for (row_child_iterator cj = m_row_query->get_children(ci); cj != false;
++cj) {
calculate_dive_M2L_and_L2L(target_vector, connected_buckets, g,
target_box, cj, source_vector, num_tasks);
}
}
} else if (target_vector.size() > 0) {
detail::calculate_L2P(target_vector, g, target_box,
m_row_query->get_bucket_particles(*ci),
m_row_query->get_particles_begin(), m_expansions);
for (col_child_iterator &cj : connected_buckets) {
if (m_col_query->is_leaf_node(*cj)) {
LOG(3, "calculate_P2P: target = " << target_box << " source = "
<< m_col_query->get_bounds(cj));
detail::calculate_P2P(target_vector, source_vector,
m_row_query->get_bucket_particles(*ci),
m_col_query->get_bucket_particles(*cj),
m_row_query->get_particles_begin(),
m_col_query->get_particles_begin(), m_kernel);
} else {
for (auto j = m_col_query->get_subtree(cj); j != false; ++j) {
if (m_col_query->is_leaf_node(*j)) {
detail::calculate_P2P(target_vector, source_vector,
m_row_query->get_bucket_particles(*ci),
m_col_query->get_bucket_particles(*j),
m_row_query->get_particles_begin(),
m_col_query->get_particles_begin(),
m_kernel);
}
}
}
}
}
}
};
#ifdef HAVE_EIGEN
template <unsigned int D, unsigned int N, typename Function,
typename KernelHelper = detail::position_kernel_helper<D, Function>,
typename Block = typename KernelHelper::Block>
detail::BlackBoxExpansions<D, N, Function, Block::RowsAtCompileTime,
Block::ColsAtCompileTime>
make_black_box_expansion(const Function &function) {
return detail::BlackBoxExpansions<D, N, Function, Block::RowsAtCompileTime,
Block::ColsAtCompileTime>(function);
}
#else
template <unsigned int D, unsigned int N, typename Function>
detail::BlackBoxExpansions<D, N, Function, 1, 1>
make_black_box_expansion(const Function &function) {
return detail::BlackBoxExpansions<D, N, Function, 1, 1>(function);
}
#endif
template <typename Expansions, typename Kernel, typename RowParticles,
typename ColParticles>
FastMultipoleMethod<Expansions, Kernel, RowParticles, ColParticles>
make_fmm(const RowParticles &row_particles, const ColParticles &col_particles,
const Expansions &expansions, const Kernel &kernel) {
return FastMultipoleMethod<Expansions, Kernel, RowParticles, ColParticles>(
row_particles, col_particles, expansions, kernel);
}
/*
template <typename Expansions, typename Kernel, typename ColParticles,
typename VectorType>
FastMultipoleMethodWithSource<Expansions,Kernel,ColParticles>
make_fmm_with_source(const ColParticles &col_particles,
const Expansions& expansions,
const Kernel& kernel,
const VectorType& source_vector) {
return FastMultipoleMethodWithSource<Expansions,Kernel,ColParticles>
(col_particles,expansions,kernel,source_vector);
}
*/
} // namespace Aboria
#endif
|
shallow_water_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED
#define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
namespace Kratos
{
///@addtogroup ShallowWaterApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ShallowWaterUtilities
KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities);
///@}
///@name Life Cycle
///@{
/// Default constructor.
/// Destructor.
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void ComputeFreeSurfaceElevation(ModelPart& rModelPart);
void ComputeHeightFromFreeSurface(ModelPart& rModelPart);
void ComputeVelocity(ModelPart& rModelPart);
void ComputeMomentum(ModelPart& rModelPart);
void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart);
void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag);
void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0);
template<class TContainerType>
void DeactivateDryEntities(TContainerType& rContainer, Flags WetFlag)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rContainer.size()); ++i)
{
auto it = rContainer.begin() + i;
it->Set(ACTIVE, it->Is(WetFlag));
}
}
void ComputeVisualizationWaterHeight(ModelPart& rModelPart, Flags WetFlag, double SeaWaterLevel = 0.0);
void ComputeVisualizationWaterSurface(ModelPart& rModelPart);
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
///@}
}; // Class ShallowWaterUtilities
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
|
symmetry.c | /* symmetry.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cell.h"
#include "debug.h"
#include "lattice.h"
#include "mathfunc.h"
#include "pointgroup.h"
#include "primitive.h"
#include "symmetry.h"
#include "debug.h"
#define NUM_ATOMS_CRITERION_FOR_OPENMP 1000
#define REDUCE_RATE 0.95
#define PI 3.14159265358979323846
/* Tolerance of angle between lattice vectors in degrees */
/* Negative value invokes converter from symprec. */
static double angle_tolerance = -1.0;
static int relative_axes[][3] = {
{ 1, 0, 0},
{ 0, 1, 0},
{ 0, 0, 1},
{-1, 0, 0},
{ 0,-1, 0}, /* 5 */
{ 0, 0,-1},
{ 0, 1, 1},
{ 1, 0, 1},
{ 1, 1, 0},
{ 0,-1,-1}, /* 10 */
{-1, 0,-1},
{-1,-1, 0},
{ 0, 1,-1},
{-1, 0, 1},
{ 1,-1, 0}, /* 15 */
{ 0,-1, 1},
{ 1, 0,-1},
{-1, 1, 0},
{ 1, 1, 1},
{-1,-1,-1}, /* 20 */
{-1, 1, 1},
{ 1,-1, 1},
{ 1, 1,-1},
{ 1,-1,-1},
{-1, 1,-1}, /* 25 */
{-1,-1, 1},
};
static int identity[3][3] = {{1, 0, 0},
{0, 1, 0},
{0, 0, 1}};
static int get_index_with_least_atoms(const Cell *cell);
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity);
static int get_operation(int rot[][3][3],
double trans[][3],
SPGCONST Cell * cell,
const double symprec);
static Symmetry * reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec);
static void search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity);
static int is_overlap_all_atoms(const double test_trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity);
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * point_sym_prim,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3]);
static int get_space_group_operation(int rot[][3][3],
double trans[][3],
SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *primitive,
const double symprec);
static int recover_operations_supercell(int rot[][3][3],
double trans[][3],
const int num_sym,
const VecDBL * pure_trans,
SPGCONST Cell *cell,
SPGCONST Cell *primitive);
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3);
static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell,
const double symprec);
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec);
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j);
Symmetry * sym_alloc_symmetry(const int size)
{
Symmetry *symmetry;
symmetry = (Symmetry*) malloc(sizeof(Symmetry));
symmetry->size = size;
if (size > 0) {
if ((symmetry->rot =
(int (*)[3][3]) malloc(sizeof(int[3][3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
exit(1);
}
if ((symmetry->trans =
(double (*)[3]) malloc(sizeof(double[3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
exit(1);
}
}
return symmetry;
}
void sym_free_symmetry(Symmetry *symmetry)
{
if (symmetry->size > 0) {
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry->trans);
symmetry->trans = NULL;
}
free(symmetry);
symmetry = NULL;
}
Symmetry * sym_get_operation(SPGCONST Cell *cell,
const double symprec) {
int i, j, num_sym;
MatINT *rot;
VecDBL *trans;
Symmetry *symmetry;
rot = mat_alloc_MatINT(cell->size * 48);
trans = mat_alloc_VecDBL(cell->size * 48);
num_sym = get_operation(rot->mat, trans->vec, cell, symprec);
symmetry = sym_alloc_symmetry(num_sym);
for (i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(symmetry->rot[i], rot->mat[i]);
for (j = 0; j < 3; j++) {
symmetry->trans[i][j] = trans->vec[i][j] - mat_Nint(trans->vec[i][j]);
}
}
mat_free_MatINT(rot);
mat_free_VecDBL(trans);
return symmetry;
}
/* Number of operations may be reduced with smaller symprec. */
Symmetry * sym_reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec)
{
return reduce_operation(cell, symmetry, symprec);
}
int sym_get_multiplicity(SPGCONST Cell *cell,
const double symprec)
{
int multi;
VecDBL * trans;
trans = get_translation(identity, cell, symprec, 1);
multi = trans->size;
mat_free_VecDBL(trans);
return multi;
}
VecDBL * sym_get_pure_translation(SPGCONST Cell *cell,
const double symprec)
{
int multi;
VecDBL * pure_trans;
pure_trans = get_translation(identity, cell, symprec, 1);
multi = pure_trans->size;
if ((cell->size / multi) * multi == cell->size) {
debug_print("sym_get_pure_translation: pure_trans->size = %d\n", multi);
} else {
;
warning_print("spglib: Finding pure translation failed (line %d, %s).\n", __LINE__, __FILE__);
warning_print(" cell->size %d, multi %d\n", cell->size, multi);
}
return pure_trans;
}
VecDBL * sym_reduce_pure_translation(SPGCONST Cell * cell,
const VecDBL * pure_trans,
const double symprec)
{
int i, multi;
Symmetry *symmetry, *symmetry_reduced;
VecDBL * pure_trans_reduced;
multi = pure_trans->size;
symmetry = sym_alloc_symmetry(multi);
for (i = 0; i < multi; i++) {
mat_copy_matrix_i3(symmetry->rot[i], identity);
mat_copy_vector_d3(symmetry->trans[i], pure_trans->vec[i]);
}
symmetry_reduced = reduce_operation(cell, symmetry, symprec);
sym_free_symmetry(symmetry);
multi = symmetry_reduced->size;
pure_trans_reduced = mat_alloc_VecDBL(multi);
for (i = 0; i < multi; i++) {
mat_copy_vector_d3(pure_trans_reduced->vec[i], symmetry_reduced->trans[i]);
}
sym_free_symmetry(symmetry_reduced);
return pure_trans_reduced;
}
void sym_set_angle_tolerance(double tolerance)
{
angle_tolerance = tolerance;
}
double sym_get_angle_tolerance(void)
{
return angle_tolerance;
}
/* 1) A primitive cell of the input cell is searched. */
/* 2) Pointgroup operations of the primitive cell are obtained. */
/* These are constrained by the input cell lattice pointgroup, */
/* i.e., even if the lattice of the primitive cell has higher */
/* symmetry than that of the input cell, it is not considered. */
/* 3) Spacegroup operations are searched for the primitive cell */
/* using the constrained point group operations. */
/* 4) The spacegroup operations for the primitive cell are */
/* transformed to those of original input cells, if the input cell */
/* was not a primitive cell. */
static int get_operation(int rot[][3][3],
double trans[][3],
SPGCONST Cell *cell,
const double symprec)
{
int num_sym;
PointSymmetry lattice_sym;
Primitive primitive;
debug_print("get_operation:\n");
num_sym = 0;
lattice_sym = get_lattice_symmetry(cell, symprec);
if (lattice_sym.size == 0) {
debug_print("get_lattice_symmetry failed.\n");
goto end;
}
primitive = prm_get_primitive_and_pure_translations(cell, symprec);
if (primitive.cell->size == 0) {goto deallocate_and_end;}
lattice_sym = transform_pointsymmetry(&lattice_sym,
primitive.cell->lattice,
cell->lattice);
if (lattice_sym.size == 0) {goto deallocate_and_end;}
num_sym = get_space_group_operation(rot, trans, &lattice_sym,
primitive.cell, symprec);
num_sym = recover_operations_supercell(rot,
trans,
num_sym,
primitive.pure_trans,
cell,
primitive.cell);
deallocate_and_end:
cel_free_cell(primitive.cell);
mat_free_VecDBL(primitive.pure_trans);
end:
return num_sym;
}
static Symmetry * reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec)
{
int i, j, num_sym;
Symmetry * sym_reduced;
PointSymmetry point_symmetry;
MatINT *rot;
VecDBL *trans;
debug_print("reduce_operation:\n");
point_symmetry = get_lattice_symmetry(cell, symprec);
rot = mat_alloc_MatINT(symmetry->size);
trans = mat_alloc_VecDBL(symmetry->size);
num_sym = 0;
for (i = 0; i < point_symmetry.size; i++) {
for (j = 0; j < symmetry->size; j++) {
if (mat_check_identity_matrix_i3(point_symmetry.rot[i],
symmetry->rot[j])) {
if (is_overlap_all_atoms(symmetry->trans[j],
symmetry->rot[j],
cell,
symprec,
0)) {
mat_copy_matrix_i3(rot->mat[num_sym], symmetry->rot[j]);
mat_copy_vector_d3(trans->vec[num_sym], symmetry->trans[j]);
num_sym++;
}
}
}
}
sym_reduced = sym_alloc_symmetry(num_sym);
for (i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(sym_reduced->rot[i], rot->mat[i]);
mat_copy_vector_d3(sym_reduced->trans[i], trans->vec[i]);
}
mat_free_MatINT(rot);
mat_free_VecDBL(trans);
debug_print(" num_sym %d -> %d\n", symmetry->size, num_sym);
return sym_reduced;
}
/* Look for the translations which satisfy the input symmetry operation. */
/* This function is heaviest in this code. */
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity)
{
int i, j, min_atom_index, num_trans = 0;
int *is_found;
double origin[3];
VecDBL *trans;
#ifdef _OPENMP
int num_min_type_atoms;
int *min_type_atoms;
double vec[3];
#endif
is_found = (int*) malloc(sizeof(int)*cell->size);
for (i = 0; i < cell->size; i++) {
is_found[i] = 0;
}
/* Look for the atom index with least number of atoms within same type */
min_atom_index = get_index_with_least_atoms(cell);
/* Set min_atom_index as the origin to measure the distance between atoms. */
mat_multiply_matrix_vector_id3(origin, rot, cell->position[min_atom_index]);
#ifdef _OPENMP
if (cell->size < NUM_ATOMS_CRITERION_FOR_OPENMP) {
search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
} else {
/* Collect indices of atoms with the type where the minimum number */
/* of atoms belong. */
min_type_atoms = (int*) malloc(sizeof(int)*cell->size);
num_min_type_atoms = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] == cell->types[min_atom_index]) {
min_type_atoms[num_min_type_atoms] = i;
num_min_type_atoms++;
}
}
#pragma omp parallel for private(j, vec)
for (i = 0; i < num_min_type_atoms; i++) {
for (j = 0; j < 3; j++) {
vec[j] = cell->position[min_type_atoms[i]][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
is_found[min_type_atoms[i]] = 1;
}
}
free(min_type_atoms);
}
#else
search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
#endif
for (i = 0; i < cell->size; i++) {
num_trans += is_found[i];
}
trans = mat_alloc_VecDBL(num_trans);
num_trans = 0;
for (i = 0; i < cell->size; i++) {
if (is_found[i]) {
for (j = 0; j < 3; j++) {
trans->vec[num_trans][j] = cell->position[i][j] - origin[j];
}
num_trans++;
}
}
free(is_found);
is_found = NULL;
return trans;
}
static void search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity)
{
int i, j;
double vec[3];
for (i = 0; i < cell->size; i++) {
if (cell->types[i] != cell->types[min_atom_index]) {
continue;
}
for (j = 0; j < 3; j++) {
vec[j] = cell->position[i][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
lat_point_atoms[i] = 1;
}
}
}
static int is_overlap_all_atoms(const double trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity)
{
int i, j, k, is_found;
double symprec2;
double pos_rot[3], d[3];
symprec2 = symprec*symprec;
for (i = 0; i < cell->size; i++) {
if (is_identity) { /* Identity matrix is treated as special for speed. */
for (j = 0; j < 3; j++) {
pos_rot[j] = cell->position[i][j] + trans[j];
}
} else {
mat_multiply_matrix_vector_id3(pos_rot,
rot,
cell->position[i]);
for (j = 0; j < 3; j++) {
pos_rot[j] += trans[j];
}
}
is_found = 0;
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
/* here cel_is_overlap can be used, but for the tuning */
/* purpose, write it again */
for (k = 0; k < 3; k++) {
d[k] = pos_rot[k] - cell->position[j][k];
d[k] -= mat_Nint(d[k]);
}
mat_multiply_matrix_vector_d3(d, cell->lattice, d);
if (d[0]*d[0]+d[1]*d[1]+d[2]*d[2] < symprec2) {
is_found = 1;
break;
}
}
}
if (! is_found) {
goto not_found;
}
}
return 1; /* found */
not_found:
return 0;
}
static int get_index_with_least_atoms(const Cell *cell)
{
int i, j, min, min_index;
int *mapping;
mapping = (int *) malloc(sizeof(int) * cell->size);
for (i = 0; i < cell->size; i++) {
mapping[i] = 0;
}
for (i = 0; i < cell->size; i++) {
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
mapping[j]++;
break;
}
}
}
min = mapping[0];
min_index = 0;
for (i = 0; i < cell->size; i++) {
if (min > mapping[i] && mapping[i] >0) {
min = mapping[i];
min_index = i;
}
}
free(mapping);
mapping = NULL;
return min_index;
}
static int get_space_group_operation(int rot[][3][3],
double trans[][3],
SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *cell,
const double symprec)
{
int i, j, k, num_sym;
VecDBL **tmp_trans;
debug_print("get_space_group_operation:\n");
num_sym = 0;
tmp_trans = (VecDBL**) malloc(sizeof(VecDBL*) * lattice_sym->size);
for (i = 0; i < lattice_sym->size; i++) {
/* get translation corresponding to a rotation */
tmp_trans[i] = get_translation(lattice_sym->rot[i], cell, symprec, 0);
}
for (i = 0; i < lattice_sym->size; i++) {
for (j = 0; j < tmp_trans[i]->size; j++) {
for (k = 0; k < 3; k++) {
trans[num_sym + j][k] = tmp_trans[i]->vec[j][k];
}
mat_copy_matrix_i3(rot[num_sym + j], lattice_sym->rot[i]);
}
num_sym += tmp_trans[i]->size;
mat_free_VecDBL(tmp_trans[i]);
}
free(tmp_trans);
tmp_trans = NULL;
return num_sym;
}
static int recover_operations_supercell(int rot[][3][3],
double trans[][3],
const int num_sym,
const VecDBL * pure_trans,
SPGCONST Cell *cell,
SPGCONST Cell *primitive)
{
int i, j, k, multi;
double inv_prim_lat[3][3], drot[3][3], trans_mat[3][3], trans_mat_inv[3][3];
MatINT *rot_prim;
VecDBL *trans_prim;
rot_prim = mat_alloc_MatINT(num_sym);
trans_prim = mat_alloc_VecDBL(num_sym);
multi = pure_trans->size;
debug_print("recover_operations_supercell:\n");
mat_inverse_matrix_d3(inv_prim_lat, primitive->lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_prim_lat, cell->lattice);
mat_inverse_matrix_d3(trans_mat_inv, trans_mat, 0);
for(i = 0; i < num_sym; i++) {
/* Translations */
mat_multiply_matrix_vector_d3(trans[i], trans_mat_inv, trans[i]);
/* Rotations */
mat_cast_matrix_3i_to_3d(drot, rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
mat_cast_matrix_3d_to_3i(rot[i], drot);
}
for(i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(rot_prim->mat[i], rot[i]);
for(j = 0; j < 3; j++)
trans_prim->vec[i][j] = trans[i][j];
}
/* Rotations and translations are copied with the set of */
/* pure translations. */
for(i = 0; i < num_sym; i++) {
for(j = 0; j < multi; j++) {
mat_copy_matrix_i3(rot[ i * multi + j ], rot_prim->mat[i]);
for (k = 0; k < 3; k++) {
trans[i * multi + j][k] =
mat_Dmod1(trans_prim->vec[i][k] + pure_trans->vec[j][k]);
}
}
}
mat_free_MatINT(rot_prim);
mat_free_VecDBL(trans_prim);
/* return number of symmetry operation of supercell */
return num_sym * multi;
}
static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell,
const double symprec)
{
int i, j, k, num_sym;
int axes[3][3];
double lattice[3][3], min_lattice[3][3];
double metric[3][3], metric_orig[3][3];
PointSymmetry lattice_sym;
debug_print("get_lattice_symmetry:\n");
if (! lat_smallest_lattice_vector(min_lattice,
cell->lattice,
symprec)) {
goto err;
}
mat_get_metric(metric_orig, min_lattice);
num_sym = 0;
for (i = 0; i < 26; i++) {
for (j = 0; j < 26; j++) {
for (k = 0; k < 26; k++) {
set_axes(axes, i, j, k);
if (! ((mat_get_determinant_i3(axes) == 1) ||
(mat_get_determinant_i3(axes) == -1))) {
continue;
}
mat_multiply_matrix_di3(lattice, min_lattice, axes);
mat_get_metric(metric, lattice);
if (is_identity_metric(metric, metric_orig, symprec)) {
mat_copy_matrix_i3(lattice_sym.rot[num_sym], axes);
num_sym++;
}
if (num_sym > 48) {
warning_print("spglib: Too many lattice symmetries was found.\n");
warning_print(" Tolerance may be too large ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
}
}
}
lattice_sym.size = num_sym;
return transform_pointsymmetry(&lattice_sym,
cell->lattice,
min_lattice);
err:
lattice_sym.size = 0;
return lattice_sym;
}
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec)
{
int i, j, k;
int elem_sets[3][2] = {{0, 1},
{0, 2},
{1, 2}};
double cos1, cos2, x, length_ave2, sin_dtheta2;
double length_orig[3], length_rot[3];
for (i = 0; i < 3; i++) {
length_orig[i] = sqrt(metric_orig[i][i]);
length_rot[i] = sqrt(metric_rotated[i][i]);
if (mat_Dabs(length_orig[i] - length_rot[i]) > symprec) {
goto fail;
}
}
for (i = 0; i < 3; i++) {
j = elem_sets[i][0];
k = elem_sets[i][1];
if (angle_tolerance > 0) {
if (mat_Dabs(get_angle(metric_orig, j, k) -
get_angle(metric_rotated, j, k)) > angle_tolerance) {
goto fail;
}
} else {
/* dtheta = arccos(cos(theta1) - arccos(cos(theta2))) */
/* = arccos(c1) - arccos(c2) */
/* = arccos(c1c2 + sqrt((1-c1^2)(1-c2^2))) */
/* sin(dtheta) = sin(arccos(x)) = sqrt(1 - x^2) */
cos1 = metric_orig[j][k] / length_orig[j] / length_orig[k];
cos2 = metric_rotated[j][k] / length_rot[j] / length_rot[k];
x = cos1 * cos2 + sqrt(1 - cos1 * cos1) * sqrt(1 - cos2 * cos2);
sin_dtheta2 = 1 - x * x;
length_ave2 = ((length_orig[j] + length_rot[j]) *
(length_orig[k] + length_rot[k])) / 4;
if (sin_dtheta2 > 1e-12) {
if (sin_dtheta2 * length_ave2 > symprec * symprec) {
goto fail;
}
}
}
}
return 1;
fail:
return 0;
}
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j)
{
double length_i, length_j;
length_i = sqrt(metric[i][i]);
length_j = sqrt(metric[j][j]);
return acos(metric[i][j] / length_i / length_j) / PI * 180;
}
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * lat_sym_orig,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3])
{
int i, size;
double trans_mat[3][3], inv_mat[3][3], drot[3][3];
PointSymmetry lat_sym_new;
mat_inverse_matrix_d3(inv_mat, original_lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_mat, new_lattice);
size = 0;
for (i = 0; i < lat_sym_orig->size; i++) {
mat_cast_matrix_3i_to_3d(drot, lat_sym_orig->rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
/* new_lattice may have lower point symmetry than original_lattice.*/
/* The operations that have non-integer elements are not counted. */
if (mat_is_int_matrix(drot, mat_Dabs(mat_get_determinant_d3(trans_mat)) / 10)) {
mat_cast_matrix_3d_to_3i(lat_sym_new.rot[size], drot);
if (! abs(mat_get_determinant_i3(lat_sym_new.rot[size])) == 1) {
warning_print("spglib: A point symmetry operation is not unimodular.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
size++;
}
}
#ifdef SPGWARNING
if (! (lat_sym_orig->size == size)) {
warning_print("spglib: Some of point symmetry operations were dropped.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
}
#endif
lat_sym_new.size = size;
return lat_sym_new;
err:
lat_sym_new.size = 0;
return lat_sym_new;
}
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3)
{
int i;
for (i = 0; i < 3; i++) {axes[i][0] = relative_axes[a1][i]; }
for (i = 0; i < 3; i++) {axes[i][1] = relative_axes[a2][i]; }
for (i = 0; i < 3; i++) {axes[i][2] = relative_axes[a3][i]; }
}
|
ErosionFilter.h | /*
* ErosionFilter.h
*
* Created on: 13.06.2016
* Author: Darius Malysiak
*/
#ifndef IMAGEPROCESSING_EROSIONFILTER_H_
#define IMAGEPROCESSING_EROSIONFILTER_H_
#include "../BaseObject.h"
#include "../DataStructures/Matrix.h"
#include "../DataStructures/Image.h"
namespace Lazarus {
template<typename T>
class ErosionFilter: public Lazarus::BaseObject {
public:
/**
* Do not forget to free the returned matrix.
* */
static const Lazarus::Matrix2<double>* get_EROSION3x3_KERNEL(double val)
{
static Lazarus::Matrix2<double> _EROSION_KERNEL;
_EROSION_KERNEL.initMatrix(3,3);
_EROSION_KERNEL.setData(0,0,val);
_EROSION_KERNEL.setData(0,1,val);
_EROSION_KERNEL.setData(0,2,val);
_EROSION_KERNEL.setData(1,0,val);
_EROSION_KERNEL.setData(1,1,val);
_EROSION_KERNEL.setData(1,2,val);
_EROSION_KERNEL.setData(2,0,val);
_EROSION_KERNEL.setData(2,1,val);
_EROSION_KERNEL.setData(2,2,val);
return &_EROSION_KERNEL;
}
/**
* Do not forget to free the returned matrix.
* */
static const Lazarus::Matrix2<double>* get_EROSION_KERNEL_SQUARE(double val, unsigned int size)
{
Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>();
_EROSION_KERNEL->initMatrix(size,size);
_EROSION_KERNEL->globalSetMatrixVal(val);
return _EROSION_KERNEL;
}
/**
* Do not forget to free the returned matrix.
* */
static const Lazarus::Matrix2<double>* get_EROSION_KERNEL_DIAMOND(double val, double val_bg, unsigned int size)
{
Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>();
_EROSION_KERNEL->initMatrix(size,size);
_EROSION_KERNEL->globalSetMatrixVal(val_bg);
//top including middle row
for(unsigned int i=0;i<size/2 + 1;i++)//include middle row
for(unsigned int j=size/2 - i; j <= size/2 + i; j++)
{
_EROSION_KERNEL->setData(i,j,val);
_EROSION_KERNEL->setData(size-1 - i,j,val);//bottom
}
return _EROSION_KERNEL;
}
/**
* Do not forget to free the returned matrix.
* Line height defines the height of the horizontal line
* around the central line, i.e. 0 will result in only the middle
* row being filled with 'val'.
* */
static const Lazarus::Matrix2<double>* get_EROSION_KERNEL_HORIZONTAL_LINE(double val, double val_bg, unsigned int size, unsigned int line_height)
{
Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>();
_EROSION_KERNEL->initMatrix(size,size);
_EROSION_KERNEL->globalSetMatrixVal(val_bg);
//middle row
for(unsigned int i=size/2 - line_height/2; i <= size/2 + line_height/2; i++)
for(unsigned int j=0; j < size; j++)
{
_EROSION_KERNEL->setData(i,j,val);
}
return _EROSION_KERNEL;
}
/**
* Do not forget to free the returned matrix.
* Line height defines the width of the vertical line
* around the central line, i.e. 0 will result in only the middle
* column being filled with 'val'.
* */
static const Lazarus::Matrix2<double>* get_EROSION_KERNEL_VERTICAL_LINE(double val, double val_bg, unsigned int size,
unsigned int line_height)
{
Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>();
_EROSION_KERNEL->initMatrix(size,size);
_EROSION_KERNEL->globalSetMatrixVal(val_bg);
//middle row
for(unsigned int i=size/2 - line_height/2; i <= size/2 + line_height/2; i++)
for(unsigned int j=0; j < size; j++)
{
_EROSION_KERNEL->setData(j,i,val);
}
return _EROSION_KERNEL;
}
ErosionFilter()
{
mp_filter_mask = NULL;
}
virtual ~ErosionFilter(){}
void setErosionKernel(const Lazarus::Matrix2<double>* filter)
{
this->mp_filter_mask = filter;
}
/**
* We assume a kernel with odd dimensions. The erosion will be computed on an extended image with black borders
* such that the kernel can be positioned onto the first image pixel.
* Returns the filtered image in case of success otherwise NULL.
**/
Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image, double clamping_val=255.0 )
{
unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2;
unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2;
unsigned int image_width = image->getm_width();
unsigned int image_heigth = image->getm_height();
unsigned int channel_count = image->getm_channel_count();
unsigned int filter_width = mp_filter_mask->getColumnCount();
unsigned int filter_height = mp_filter_mask->getRowCount();
if(filter_width % 2 != 1)
{
printf("filter width %d is not odd\n",filter_width);
return NULL;
}
if(filter_height % 2 != 1)
{
printf("filter height %d is not odd\n",filter_height);
return NULL;
}
Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() );
Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x,
image_heigth + 2*offset_y, image->getm_data_alignment() );
//fill the output and temp image with black
Lazarus::FastKTuple<T> color(channel_count);
for(unsigned int i=0; i< channel_count; i++)
{
color.setElement(i,0);
}
output->fillImageFast( &color );
temporary->fillImageFast( &color );
//copy the input image into the temp buffer;
for(unsigned int i=0; i<image_width; i++)
{
for(unsigned int j=0; j<image_heigth; j++)
{
image->getPixelFast( &color,i,j );
temporary->setPixelFast(&color,offset_x + i,offset_y + j);
}
}
//start the convolution process
//over every pixel
unsigned int c_limit = 0;
if(channel_count > 3)
c_limit = 3;
else
c_limit = channel_count;
double dmax = std::numeric_limits<double>::max();
T min = std::numeric_limits<T>::min();
#pragma omp parallel for
for(unsigned int i=offset_x; i<image_width+(offset_x); i++)
{ //if(i%100){printf(".");fflush(stdout);}
double temp_value = dmax;
double filter_value = 0;
Lazarus::FastKTuple<T> new_color(channel_count);
Lazarus::FastKTuple<T> color_(channel_count);
for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++)
{
//over every color channel
for(unsigned int c=0; c<c_limit; c++)
{
//erosion
for(int k=-offset_x; k<=(int)offset_x; ++k)
{
for(int l=-offset_y; l<=(int)offset_y; ++l)
{
temporary->getPixelFast(&color_, (unsigned int)((int)i+k),
(unsigned int)((int)j+l));
filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k),
(unsigned int)((int)offset_y+l) );
if( (double)(color_.getElement(c)) - filter_value < temp_value )
{
temp_value = (double)(color_.getElement(c))-filter_value;
}
}
}
new_color.setElement(c,(T)std::min(std::max(temp_value,(double)min),clamping_val));
temp_value=dmax;//reset
}
//set the alpha value to the image value
if(channel_count>3)
{
new_color.setElement(3,color_.getElement(3));
}
output->setPixelFast(&new_color,i-(offset_x),j-(offset_y));
}
}
//delete the temporary image
delete temporary;
return output;
}
/**
* We assume a kernel with odd dimensions. The erosion will be computed on an extended image with black borders
* such that the kernel can be positioned onto the first image pixel.
* Returns the filtered image in case of success otherwise NULL.
**/
Lazarus::Image<T>* filterImageBW( Lazarus::Image<T>* image, double white=255.0 )
{
unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2;
unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2;
unsigned int image_width = image->getm_width();
unsigned int image_heigth = image->getm_height();
unsigned int channel_count = image->getm_channel_count();
unsigned int filter_width = mp_filter_mask->getColumnCount();
unsigned int filter_height = mp_filter_mask->getRowCount();
if(filter_width % 2 != 1)
{
printf("filter width %d is not odd\n",filter_width);
return NULL;
}
if(filter_height % 2 != 1)
{
printf("filter height %d is not odd\n",filter_height);
return NULL;
}
Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() );
Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x,
image_heigth + 2*offset_y, image->getm_data_alignment() );
//fill the output and temp image with black
Lazarus::FastKTuple<T> color(channel_count);
for(unsigned int i=0; i< channel_count; i++)
{
color.setElement(i,0);
}
output->fillImageFast( &color );
temporary->fillImageFast( &color );
//copy the input image into the temp buffer;
for(unsigned int i=0; i<image_width; i++)
{
for(unsigned int j=0; j<image_heigth; j++)
{
image->getPixelFast( &color,i,j );
temporary->setPixelFast(&color,offset_x + i,offset_y + j);
}
}
//start the convolution process
//over every pixel
#pragma omp parallel for
for(unsigned int i=offset_x; i<image_width+(offset_x); i++)
{
bool match = true;
double filter_value = 0;
Lazarus::FastKTuple<T> new_color(channel_count);
Lazarus::FastKTuple<T> color_(channel_count);
unsigned int c_limit = std::max(channel_count,(unsigned int)3);
for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++)
{
//over every color channel
for(unsigned int c=0; c<c_limit; c++)
{
//erosion
for(int k=-offset_x; k<=(int)offset_x; ++k)
{
for(int l=-offset_y; l<=(int)offset_y; ++l)
{
temporary->getPixelFast(&color_, (unsigned int)((int)i+k),
(unsigned int)((int)j+l));
filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k),
(unsigned int)((int)offset_y+l) );
if( color_.getElement(c) != filter_value )
{
match = false;
break;
}
}
if(match == true)//early break out of outer loop
{
break;
}
}
if(match == true)
{
new_color.setElement(c,(T)white);
}
match=true;//reset
}
//set the alpha value to the image value
if(channel_count>3)
{
new_color.setElement(3,color_.getElement(3));
}
output->setPixelFast(&new_color,i-(offset_x),j-(offset_y));
}
}
//delete the temporary image
delete temporary;
return output;
}
private:
const Lazarus::Matrix2<double>* mp_filter_mask;
};
}
#endif /* IMAGEPROCESSING_EROSIONFILTER_H_ */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
vecnn.h |
#ifndef _nnvec_h
#define _nnvec_h
#include "allegro_emu.h"
#include <float.h>
#include "nn.h"
/* -------------------------------------------------------------------------
Randomized NN algorithm, on vector input (patches, SIFT descriptors, etc)
-------------------------------------------------------------------------
Descriptors are vectors of type T and dimension n.
Note that ACCUM must be a signed type, and must be large enough to hold the distance between any two descriptors. */
// data is represented row by row (different from matlab)
// w * h * n: (x, y, z) -> x + y * w + z * h * w
template<class T>
class VECBITMAP {
public:
T *data;
int w, h, n;
T *get(int x, int y) { return &data[(y*w+x)*n]; } /* Get patch (x, y). */
T *line_n1(int y) { return &data[y*w]; } /* Get line y assuming n=1. */
VECBITMAP() { }
VECBITMAP(int w_, int h_, int n_) { w = w_; h = h_; n = n_; data = new T[w*h*n]; }
~VECBITMAP() { delete[] data; }
};
template<class T>
void clear_to_color(VECBITMAP<T> *bmp, T c) {
int n = bmp->w*bmp->h*bmp->n;
for (int i = 0; i < n; i++) {
bmp->data[i] = c;
}
}
template<class T>
T get_maxval() { fprintf(stderr, "get_maxval of unsupported template type\n"); exit(1); }
template<> int get_maxval();
template<> float get_maxval();
template<> double get_maxval();
template<> long long get_maxval();
#include "vecpatch.h"
template<class T>
inline BITMAP *wrap_vecbitmap(VECBITMAP<T> *a) {
return new BITMAP(a->w, a->h); // no memory allocated for per pixel data
}
// nothing special here, just re-use init_nn to assign a random NN field
template<class T>
BITMAP *vec_init_nn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) {
BITMAP *awrap = wrap_vecbitmap(a);
BITMAP *bwrap = wrap_vecbitmap(b);
BITMAP *ans = init_nn(p, awrap, bwrap, bmask, region_masks, amask, 0);
delete awrap;
delete bwrap;
return ans;
}
template<class T>
Box get_abox_vec(Params *p, VECBITMAP<T> *a, RegionMasks *amask, int trim_patch=1) {
BITMAP *aw = wrap_vecbitmap(a);
Box ans = get_abox(p, aw, amask, trim_patch);
delete aw;
return ans;
}
template<class T, class ACCUM, int IS_MASK, int IS_WINDOW>
VECBITMAP<ACCUM> *vec_init_dist_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) {
VECBITMAP<ACCUM> *ans = new VECBITMAP<ACCUM>(a->w, a->h, 1); // the third dimension only has one element
ACCUM maxval = get_maxval<ACCUM>();
for (int y = 0; y < ans->h; y++) {
ACCUM *row = ans->line_n1(y);
for (int x = 0; x < ans->w; x++) {
row[x] = maxval;
}
}
if (region_masks) {
if (region_masks->bmp->w != a->w || region_masks->bmp->h != a->h) { fprintf(stderr, "region_masks (%dx%d) size != a (%dx%d) size\n", region_masks->bmp->w, region_masks->bmp->h, a->w, a->h); exit(1); }
if (region_masks->bmp->w != b->w || region_masks->bmp->h != b->h) { fprintf(stderr, "region_masks (%dx%d) size != b (%dx%d) size\n", region_masks->bmp->w, region_masks->bmp->h, b->w, b->h); exit(1); }
}
Box box = get_abox_vec(p, a, amask, 0);
#pragma omp parallel for schedule(static, 4)
for (int y = box.ymin; y < box.ymax; y++) {
ACCUM *row = (ACCUM *) ans->line_n1(y);
int *arow = amask ? (int *) amask->bmp->line[y]: NULL;
for (int x = box.xmin; x < box.xmax; x++) {
if (IS_MASK && amask && arow[x]) { continue; }
int xp, yp;
getnn(ann, x, y, xp, yp);
if (IS_MASK && region_masks && ((int *) region_masks->bmp->line[y])[x] != ((int *) region_masks->bmp->line[yp])[xp]) {
row[x] = maxval; continue;
}
// <!-- XC, change here to handle a real patch!, refer to init_nn_dist
T *apatch = a->get(x, y);
if (IS_MASK && bmask && ((int *) bmask->line[yp])[xp]) { row[x] = maxval; continue; }
T *bpatch = b->get(xp, yp);
row[x] = vec_fast_patch_nobranch<T, ACCUM, IS_WINDOW>(apatch, bpatch, p);
// XC -->
//if (x == 1 && y == 1) { printf("1, 1 => %d, %d (%d)\n", xp, yp, row[x]); }
}
}
return ans;
}
template<class T, class ACCUM>
VECBITMAP<ACCUM> *vec_init_dist(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) {
VECBITMAP<ACCUM> *ans = NULL;
if (is_window(p))
ans = vec_init_dist_n<T, ACCUM, 1, 1>(p, a, b, ann, bmask, region_masks, amask);
else if (amask || bmask || region_masks)
ans = vec_init_dist_n<T, ACCUM, 1, 0>(p, a, b, ann, bmask, region_masks, amask);
else
ans = vec_init_dist_n<T, ACCUM, 0, 0>(p, a, b, ann, bmask, region_masks, amask);
return ans;
}
template<class T>
int window_constraint_wrap(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, int ax, int ay, int bx, int by) {
BITMAP aw, bw;
aw.w = a->w; aw.h = a->h;
bw.w = b->w; bw.h = b->h;
return window_constraint(p, &aw, &bw, ax, ay, bx, by);
}
// <!-- XC, this is the part to change!!!
// XC -->
template<class T, class ACCUM, int IS_MASK, int IS_WINDOW>
void vec_nn_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b,
BITMAP *ann, VECBITMAP<ACCUM> *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1) {
if (tiles < 0) { tiles = p->cores; }
printf("in vec_nn_n, masks are: %p %p %p, tiles=%d, rs_max=%d\n", amask, bmask, region_masks, tiles, p->rs_max);
Box box = get_abox_vec(p, a, amask, 0);
int nn_iter = 0;
for (; nn_iter < p->nn_iters; nn_iter++) {
unsigned int iter_seed = rand();
#pragma omp parallel num_threads(tiles)
{
#if USE_OPENMP
int ithread = omp_get_thread_num();
#else
int ithread = 0;
#endif
int xmin = box.xmin, xmax = box.xmax;
int ymin = box.ymin + (box.ymax-box.ymin)*ithread/tiles;
int ymax = box.ymin + (box.ymax-box.ymin)*(ithread+1)/tiles;
// from top left
int xstart = xmin, xfinal = xmax, xchange=1;
int ystart = ymin, yfinal = ymax, ychange=1;
if ((nn_iter + offset_iter) % 2 == 1) {
xstart = xmax-1; xfinal = xmin-1; xchange=-1;
ystart = ymax-1; yfinal = ymin-1; ychange=-1;
}
int dx = -xchange, dy = -ychange;
int bew = b->w, beh = b->h;
int max_mag = max(b->w, b->h);
int rs_ipart = int(p->rs_iters);
double rs_fpart = p->rs_iters - rs_ipart;
int rs_max = p->rs_max;
if (rs_max > max_mag) { rs_max = max_mag; }
for (int y = ystart; y != yfinal; y += ychange) {
ACCUM *annd_row = annd->line_n1(y);
int *amask_row = IS_MASK ? (amask ? (int *) amask->bmp->line[y]: NULL): NULL;
for (int x = xstart; x != xfinal; x += xchange) {
if (IS_MASK && amask && amask_row[x]) { continue; }
T *apatch = a->get(x, y);
int src_mask = IS_MASK ? (region_masks ? ((int *) region_masks->bmp->line[y])[x]: 0): 0;
int xbest, ybest;
getnn(ann, x, y, xbest, ybest);
ACCUM err = annd_row[x];
if (!err) { continue; }
/* Propagate */
if (p->do_propagate) {
/* Propagate x */
if ((unsigned) (x+dx) < (unsigned) (ann->w)) {
int xpp, ypp;
getnn(ann, x+dx, y, xpp, ypp);
xpp -= dx;
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
vec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW>(err, xbest, ybest, apatch, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
/* Propagate y */
if ((unsigned) (y+dy) < (unsigned) (ann->h)) {
int xpp, ypp;
getnn(ann, x, y+dy, xpp, ypp);
ypp -= dy;
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
vec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW>(err, xbest, ybest, apatch, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
}
/* Random search */
unsigned int seed = (x | (y<<11)) ^ iter_seed;
seed = RANDI(seed);
int rs_iters = 1-(seed*(1.0/(RAND_MAX-1))) < rs_fpart ? rs_ipart + 1: rs_ipart;
// int rs_iters = 1-random() < rs_fpart ? rs_ipart + 1: rs_ipart;
int rs_max_curr = rs_max;
for (int mag = rs_max_curr; mag >= p->rs_min; mag = int(mag*p->rs_ratio)) {
for (int rs_iter = 0; rs_iter < rs_iters; rs_iter++) {
int xmin = max(xbest-mag,0), xmax = min(xbest+mag+1,bew);
int ymin = max(ybest-mag,0), ymax = min(ybest+mag+1,beh);
seed = RANDI(seed);
int xpp = xmin+seed%(xmax-xmin);
seed = RANDI(seed);
int ypp = ymin+seed%(ymax-ymin);
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
vec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW>(err, xbest, ybest, apatch, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
}
((int *) ann->line[y])[x] = XY_TO_INT(xbest, ybest);
annd_row[x] = err;
} // x
} // y
} // parallel
} // nn_iter
printf("done vec_nn_n, %d iters, rs_max=%d\n", nn_iter, p->rs_max);
}
// <!-- XC, might need to consider the last two parameter as nn(...)
/*
void nn(Params *p, BITMAP *a, BITMAP *b,
BITMAP *ann, BITMAP *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1, BITMAP *ann_window=NULL, BITMAP *awinsize=NULL);
*/
// XC -->
template<class T, class ACCUM>
void vec_nn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b,
BITMAP *ann, VECBITMAP<ACCUM> *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1)
{
if (p->algo == ALGO_CPU || p->algo == ALGO_CPUTILED) {
if (is_window(p)) {
printf("Running vec_nn (cputiled), using windowed and masked\n");
vec_nn_n<T, ACCUM, 1, 1>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
} else if (bmask == NULL && amask == NULL && region_masks == NULL) {
printf("Running vec_nn (cputiled), using unmasked\n");
vec_nn_n<T, ACCUM, 0, 0>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
} else {
printf("Running vec_nn (cputiled), using masked\n");
vec_nn_n<T, ACCUM, 1, 0>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
}
} else {
fprintf(stderr, "vec_nn: algorithm %d unsupported\n", p->algo); exit(1);
}
}
template<class T, class ACCUM, int IS_WINDOW, int HAS_MASKS>
void vec_minnn_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, VECBITMAP<ACCUM> *annd, BITMAP *ann_prev, BITMAP *bmask, int level, int em_iter, RecomposeParams *rp, RegionMasks *region_masks, RegionMasks *amask, int ntiles) {
if (ntiles < 0) { ntiles = p->cores; }
printf("vec_minnn: %d %d %d %d, tiles=%d\n", ann->w, ann->h, ann_prev->w, ann_prev->h, ntiles);
if (!rp) { fprintf(stderr, "vec_minnn_n: rp is NULL\n"); exit(1); }
// double start_t = accurate_timer();
Box box = get_abox_vec(p, a, amask, 0);
#pragma omp parallel for schedule(static,4) num_threads(ntiles)
for (int y = box.ymin; y < box.ymax; y++) {
int *amask_row = amask ? (int *) amask->bmp->line[y]: NULL;
ACCUM *annd_row = (ACCUM *) annd->line_n1(y);
for (int x = box.xmin; x < box.xmax; x++) {
if (HAS_MASKS && amask && amask_row[x]) { continue; }
ACCUM dcurrent = annd_row[x];
int xp, yp;
getnn(ann_prev, x, y, xp, yp);
if ((unsigned) xp >= (unsigned) (b->w) ||
(unsigned) yp >= (unsigned) (b->h)) { continue; }
if (HAS_MASKS && bmask && ((int *) bmask->line[yp])[xp]) { continue; }
// <!-- XC, some newer patch distance for discriptor mode has to be used here (add code to vecpatch.h)
//int dprev = patch_dist(p, a, x, y, b, xp, yp, dcurrent, region_masks);
ACCUM dprev = vec_patch_dist_ab<T, ACCUM, IS_WINDOW, HAS_MASKS>(p, a, x, y, b, xp, yp, dcurrent, region_masks);
// XC -->
if (dprev < dcurrent) {
_putpixel32(ann, x, y, XY_TO_INT(xp, yp));
annd_row[x] = dprev;
}
}
}
// nn_time += accurate_timer() - start_t;
Params pcopy(*p);
pcopy.nn_iters = rp->minnn_optp_nn_iters;
pcopy.rs_max = rp->minnn_optp_rs_max;
vec_nn<T, ACCUM>(&pcopy, a, b, ann, annd, amask, bmask, level, em_iter, rp, 0, 0, 1, region_masks, ntiles);
}
template<class T, class ACCUM>
void vec_minnn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, VECBITMAP<ACCUM> *annd, BITMAP *ann_prev, BITMAP *bmask, int level, int em_iter, RecomposeParams *rp, RegionMasks *region_masks, RegionMasks *amask, int ntiles) {
if (is_window(p))
return vec_minnn_n<T, ACCUM, 1, 1>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles);
else if (bmask || region_masks || amask)
return vec_minnn_n<T, ACCUM, 0, 1>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles);
else
return vec_minnn_n<T, ACCUM, 0, 0>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles);
}
// --------------------------------
// XC version of discriptor mode
// --------------------------------
template<class T>
BITMAP *XCvec_init_nn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) {
BITMAP *awrap = wrap_vecbitmap(a);
BITMAP *bwrap = wrap_vecbitmap(b);
BITMAP *ans = init_nn(p, awrap, bwrap, bmask, region_masks, amask, 1); // trim patch!!!
delete awrap;
delete bwrap;
return ans;
}
template<class T, class ACCUM, int IS_MASK, int IS_WINDOW, int PATCH_W>
VECBITMAP<ACCUM> *XCvec_init_dist_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) \
{
VECBITMAP<ACCUM> *ans = new VECBITMAP<ACCUM>(a->w, a->h, 1); // the third dimension only has one element
// set all distances to a large number
ACCUM maxval = get_maxval<ACCUM>();
for (int y = 0; y < ans->h; y++) {
ACCUM *row = ans->line_n1(y);
for (int x = 0; x < ans->w; x++) {
row[x] = maxval;
}
}
if (region_masks) {
if (region_masks->bmp->w != a->w || region_masks->bmp->h != a->h) { fprintf(stderr, "region_masks (%dx%d) size != a (%dx%d) size\n", region_masks->bmp->w, region_masks->bmp->h, a->w, a->h); exit(1); }
if (region_masks->bmp->w != b->w || region_masks->bmp->h != b->h) { fprintf(stderr, "region_masks (%dx%d) size != b (%dx%d) size\n", region_masks->bmp->w, region_masks->bmp->h, b->w, b->h); exit(1); }
}
Box box = get_abox_vec(p, a, amask, 1); // need to trim patch!!!
#pragma omp parallel for schedule(static, 4)
for (int y = box.ymin; y < box.ymax; y++) {
T *adata[PATCH_W][PATCH_W]; // aggregate a patch of vectors
ACCUM *row = (ACCUM *) ans->line_n1(y);
int *arow = amask ? (int *) amask->bmp->line[y]: NULL;
for (int x = box.xmin; x < box.xmax; x++) {
if (IS_MASK && amask && arow[x]) { continue; }
int xp, yp;
getnn(ann, x, y, xp, yp);
if (IS_MASK && region_masks && ((int *) region_masks->bmp->line[y])[x] != ((int *) region_masks->bmp->line[yp])[xp]) {
continue;
}
for (int dy = 0; dy < PATCH_W; dy++) {
for (int dx = 0; dx < PATCH_W; dx++) {
adata[dy][dx] = a->get(x+dx, y+dy);
}
}
if (IS_MASK && bmask && ((int *) bmask->line[yp])[xp]) { continue; }
row[x] = XCvec_fast_patch_nobranch<T, ACCUM, IS_WINDOW, PATCH_W>(adata, b, xp, yp, p);
//if (x == 1 && y == 1) { printf("1, 1 => %d, %d (%d)\n", xp, yp, row[x]); }
}
}
return ans;
}
template<class T, class ACCUM>
VECBITMAP<ACCUM> *XCvec_init_dist(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL) {
VECBITMAP<ACCUM> *ans = NULL;
if (is_window(p)) {
if (p->patch_w == 1) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 1>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 2) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 2>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 3) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 3>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 4) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 4>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 5) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 5>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 6) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 6>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 7) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 7>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 8) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 8>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 9) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 9>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 10) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 10>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 11) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 11>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 12) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 12>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 13) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 13>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 14) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 14>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 15) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 15>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 16) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 16>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 17) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 17>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 18) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 18>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 19) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 19>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 20) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 20>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 21) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 21>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 22) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 22>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 23) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 23>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 24) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 24>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 25) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 25>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 26) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 26>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 27) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 27>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 28) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 28>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 29) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 29>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 30) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 30>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 31) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 31>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 32) { ans = XCvec_init_dist_n<T, ACCUM, 1, 1, 32>(p, a, b, ann, bmask, region_masks, amask); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else if (amask || bmask || region_masks) {
if (p->patch_w == 1) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 1>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 2) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 2>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 3) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 3>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 4) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 4>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 5) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 5>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 6) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 6>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 7) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 7>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 8) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 8>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 9) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 9>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 10) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 10>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 11) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 11>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 12) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 12>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 13) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 13>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 14) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 14>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 15) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 15>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 16) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 16>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 17) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 17>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 18) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 18>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 19) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 19>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 20) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 20>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 21) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 21>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 22) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 22>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 23) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 23>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 24) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 24>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 25) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 25>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 26) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 26>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 27) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 27>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 28) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 28>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 29) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 29>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 30) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 30>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 31) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 31>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 32) { ans = XCvec_init_dist_n<T, ACCUM, 1, 0, 32>(p, a, b, ann, bmask, region_masks, amask); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else {
if (p->patch_w == 1) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 1>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 2) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 2>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 3) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 3>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 4) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 4>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 5) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 5>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 6) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 6>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 7) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 7>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 8) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 8>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 9) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 9>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 10) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 10>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 11) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 11>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 12) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 12>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 13) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 13>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 14) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 14>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 15) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 15>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 16) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 16>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 17) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 17>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 18) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 18>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 19) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 19>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 20) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 20>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 21) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 21>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 22) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 22>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 23) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 23>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 24) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 24>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 25) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 25>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 26) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 26>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 27) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 27>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 28) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 28>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 29) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 29>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 30) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 30>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 31) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 31>(p, a, b, ann, bmask, region_masks, amask); }
else if (p->patch_w == 32) { ans = XCvec_init_dist_n<T, ACCUM, 0, 0, 32>(p, a, b, ann, bmask, region_masks, amask); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
return ans;
}
// similar to nn_n_cputiled
template<class T, class ACCUM, int IS_MASK, int IS_WINDOW, int PATCH_W>
void XCvec_nn_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b,
BITMAP *ann, VECBITMAP<ACCUM> *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1)
{
if (tiles < 0) { tiles = p->cores; }
printf("in vec_nn_n, masks are: %p %p %p, tiles=%d, rs_max=%d\n", amask, bmask, region_masks, tiles, p->rs_max);
Box box = get_abox_vec(p, a, amask, 1); // !!! need to trim patch
int nn_iter = 0;
for (; nn_iter < p->nn_iters; nn_iter++) {
unsigned int iter_seed = rand();
#pragma omp parallel num_threads(tiles)
{
#if USE_OPENMP
int ithread = omp_get_thread_num();
#else
int ithread = 0;
#endif
int xmin = box.xmin, xmax = box.xmax;
int ymin = box.ymin + (box.ymax-box.ymin)*ithread/tiles;
int ymax = box.ymin + (box.ymax-box.ymin)*(ithread+1)/tiles;
int ystart = ymin, yfinal = ymax, ychange=1; // from up-left to bottom-right
int xstart = xmin, xfinal = xmax, xchange=1;
if ((nn_iter + offset_iter) % 2 == 1) {
ystart = ymax-1; yfinal = ymin-1; ychange=-1; // from bottom-right to up-left
xstart = xmax-1; xfinal = xmin-1; xchange=-1;
}
int dx = -xchange, dy = -ychange;
int bew = b->w-PATCH_W, beh = b->h-PATCH_W;
int max_mag = max(b->w, b->h);
int rs_ipart = int(p->rs_iters);
double rs_fpart = p->rs_iters - rs_ipart;
int rs_max = p->rs_max;
if (rs_max > max_mag) { rs_max = max_mag; }
T* adata[PATCH_W][PATCH_W];
for (int y = ystart; y != yfinal; y += ychange) {
ACCUM *annd_row = annd->line_n1(y);
int *amask_row = IS_MASK ? (amask ? (int *) amask->bmp->line[y]: NULL): NULL;
for (int x = xstart; x != xfinal; x += xchange) {
if (IS_MASK && amask && amask_row[x]) { continue; }
for (int dy0 = 0; dy0 < PATCH_W; dy0++) { // copy a patch from a
for (int dx0 = 0; dx0 < PATCH_W; dx0++) {
adata[dy0][dx0] = a->get(x+dx0, y+dy0);
}
}
int src_mask = IS_MASK ? (region_masks ? ((int *) region_masks->bmp->line[y])[x]: 0): 0;
int xbest, ybest;
getnn(ann, x, y, xbest, ybest);
ACCUM err = annd_row[x];
if (err == 0) { continue; }
/* Propagate */
if (p->do_propagate) {
if(!IS_WINDOW) {
/* Propagate x */
if ((unsigned) (x+dx) < (unsigned) (ann->w-PATCH_W)) {
int xpp, ypp;
getnn(ann, x+dx, y, xpp, ypp);
xpp -= dx;
if ((xpp != xbest || ypp != ybest) &&
(unsigned) xpp < (unsigned) (b->w-PATCH_W+1) &&
(!IS_MASK ||
((!region_masks || ((int *) region_masks->bmp->line[ypp])[xpp] == src_mask) &&
(!bmask || !((int *) bmask->line[ypp])[xpp]) &&
(!amask || !((int *) amask->bmp->line[y])[x+dx]))
))
{
ACCUM err0 = annd_row[x+dx];
int xa = dx, xb = 0;
if (dx > 0) { xa = 0; xb = dx; }
ACCUM partial = 0;
for (int yi = 0; yi < PATCH_W; yi++) {
T* c1 = a->get(x+xa, y+yi);
T* c2 = b->get(xpp+xa, ypp+yi);
T* c3 = a->get(x+xb+PATCH_W-1, y+yi);
T* c4 = b->get(xpp+xb+PATCH_W-1, ypp+yi);
for (int i = 0; i < p->vec_len; i ++) {
ACCUM di12 = ((ACCUM)c1[i]) - ((ACCUM)c2[i]);
ACCUM di34 = ((ACCUM)c3[i]) - ((ACCUM)c4[i]);
partial += (di34*di34 - di12*di12);
}
}
err0 += (dx < 0) ? partial: -partial;
if (err0 < err) {
err = err0;
xbest = xpp;
ybest = ypp;
}
}
} // end of propagate x
/* Propagate y */
if ((unsigned) (y+dy) < (unsigned) (ann->h-PATCH_W)) {
int xpp, ypp;
getnn(ann, x, y+dy, xpp, ypp);
ypp -= dy;
if ((xpp != xbest || ypp != ybest) &&
(unsigned) ypp < (unsigned) (b->h-PATCH_W+1) &&
(!IS_MASK ||
((!region_masks || ((int *) region_masks->bmp->line[ypp])[xpp] == src_mask) &&
(!bmask || !((int *) bmask->line[ypp])[xpp]) &&
(!amask || !((int *) amask->bmp->line[y+dy])[x]))
))
{
ACCUM err0 = annd->line_n1(y+dy)[x];
int ya = dy, yb = 0;
if (dy > 0) { ya = 0; yb = dy; }
ACCUM partial = 0;
for (int xi = 0; xi < PATCH_W; xi++) {
T* c1 = a->get(x+xi, y+ya);
T* c2 = b->get(xpp+xi, ypp+ya);
T* c3 = a->get(x+xi, y+yb+PATCH_W-1);
T* c4 = b->get(xpp+xi, ypp+yb+PATCH_W-1);
for (int i = 0; i < p->vec_len; i ++) {
ACCUM di12 = ((ACCUM)c1[i]) - ((ACCUM)c2[i]);
ACCUM di34 = ((ACCUM)c3[i]) - ((ACCUM)c4[i]);
partial += di34*di34 - di12*di12;
}
}
err0 += (dy < 0) ? partial: -partial;
if (err0 < err) {
err = err0;
xbest = xpp;
ybest = ypp;
}
}
} // end of progagate y
} // end of IS_WINDOW = false
else { // IS_WINDOW = true
/* Propagate x */
if ((unsigned) (x+dx) < (unsigned) (ann->w-PATCH_W)) {
int xpp, ypp;
getnn(ann, x+dx, y, xpp, ypp);
xpp -= dx;
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
XCvec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW, PATCH_W>(err, xbest, ybest, adata, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
/* Propagate y */
if ((unsigned) (y+dy) < (unsigned) (ann->h-PATCH_W)) {
int xpp, ypp;
getnn(ann, x, y+dy, xpp, ypp);
ypp -= dy;
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
XCvec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW, PATCH_W>(err, xbest, ybest, adata, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
} // end of IS_WINDOW = true
} // end of do_propagation
/* Random search */
unsigned int seed = (x | (y<<11)) ^ iter_seed;
seed = RANDI(seed);
int rs_iters = 1-(seed*(1.0/(RAND_MAX-1))) < rs_fpart ? rs_ipart + 1: rs_ipart;
// int rs_iters = 1-random() < rs_fpart ? rs_ipart + 1: rs_ipart;
int rs_max_curr = rs_max;
for (int mag = rs_max_curr; mag >= p->rs_min; mag = int(mag*p->rs_ratio)) {
for (int rs_iter = 0; rs_iter < rs_iters; rs_iter++) {
int xmin = max(xbest-mag,0), xmax = min(xbest+mag+1,bew);
int ymin = max(ybest-mag,0), ymax = min(ybest+mag+1,beh);
seed = RANDI(seed);
int xpp = xmin+seed%(xmax-xmin);
seed = RANDI(seed);
int ypp = ymin+seed%(ymax-ymin);
if (!IS_WINDOW || window_constraint_wrap(p, a, b, x, y, xpp, ypp)) {
XCvec_attempt_n<T, ACCUM, IS_MASK, IS_WINDOW, PATCH_W>(err, xbest, ybest, adata, b, xpp, ypp, bmask, region_masks, src_mask, p);
}
}
}
((int *) ann->line[y])[x] = XY_TO_INT(xbest, ybest);
annd_row[x] = err;
} // x
} // y
} // parallel
} // nn_iter
printf("done vec_nn_n, %d iters, rs_max=%d\n", nn_iter, p->rs_max);
}
template<class T, class ACCUM>
void XCvec_nn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b,
BITMAP *ann, VECBITMAP<ACCUM> *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1)
{
if (p->algo == ALGO_CPU || p->algo == ALGO_CPUTILED) {
if (is_window(p)) {
printf("Running vec_nn (cputiled), using windowed and masked\n");
if (p->patch_w == 1) XCvec_nn_n<T, ACCUM, 1, 1, 1>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 2) XCvec_nn_n<T, ACCUM, 1, 1, 2>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 3) XCvec_nn_n<T, ACCUM, 1, 1, 3>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 4) XCvec_nn_n<T, ACCUM, 1, 1, 4>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 5) XCvec_nn_n<T, ACCUM, 1, 1, 5>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 6) XCvec_nn_n<T, ACCUM, 1, 1, 6>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 7) XCvec_nn_n<T, ACCUM, 1, 1, 7>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 8) XCvec_nn_n<T, ACCUM, 1, 1, 8>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 9) XCvec_nn_n<T, ACCUM, 1, 1, 9>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 10) XCvec_nn_n<T, ACCUM, 1, 1, 10>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 11) XCvec_nn_n<T, ACCUM, 1, 1, 11>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 12) XCvec_nn_n<T, ACCUM, 1, 1, 12>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 13) XCvec_nn_n<T, ACCUM, 1, 1, 13>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 14) XCvec_nn_n<T, ACCUM, 1, 1, 14>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 15) XCvec_nn_n<T, ACCUM, 1, 1, 15>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 16) XCvec_nn_n<T, ACCUM, 1, 1, 16>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 17) XCvec_nn_n<T, ACCUM, 1, 1, 17>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 18) XCvec_nn_n<T, ACCUM, 1, 1, 18>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 19) XCvec_nn_n<T, ACCUM, 1, 1, 19>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 20) XCvec_nn_n<T, ACCUM, 1, 1, 20>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 21) XCvec_nn_n<T, ACCUM, 1, 1, 21>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 22) XCvec_nn_n<T, ACCUM, 1, 1, 22>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 23) XCvec_nn_n<T, ACCUM, 1, 1, 23>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 24) XCvec_nn_n<T, ACCUM, 1, 1, 24>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 25) XCvec_nn_n<T, ACCUM, 1, 1, 25>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 26) XCvec_nn_n<T, ACCUM, 1, 1, 26>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 27) XCvec_nn_n<T, ACCUM, 1, 1, 27>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 28) XCvec_nn_n<T, ACCUM, 1, 1, 28>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 29) XCvec_nn_n<T, ACCUM, 1, 1, 29>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 30) XCvec_nn_n<T, ACCUM, 1, 1, 30>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 31) XCvec_nn_n<T, ACCUM, 1, 1, 31>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 32) XCvec_nn_n<T, ACCUM, 1, 1, 32>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else if (bmask == NULL && amask == NULL && region_masks == NULL) {
printf("Running vec_nn (cputiled), using unmasked\n");
if (p->patch_w == 1) XCvec_nn_n<T, ACCUM, 0, 0, 1>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 2) XCvec_nn_n<T, ACCUM, 0, 0, 2>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 3) XCvec_nn_n<T, ACCUM, 0, 0, 3>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 4) XCvec_nn_n<T, ACCUM, 0, 0, 4>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 5) XCvec_nn_n<T, ACCUM, 0, 0, 5>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 6) XCvec_nn_n<T, ACCUM, 0, 0, 6>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 7) XCvec_nn_n<T, ACCUM, 0, 0, 7>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 8) XCvec_nn_n<T, ACCUM, 0, 0, 8>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 9) XCvec_nn_n<T, ACCUM, 0, 0, 9>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 10) XCvec_nn_n<T, ACCUM, 0, 0, 10>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 11) XCvec_nn_n<T, ACCUM, 0, 0, 11>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 12) XCvec_nn_n<T, ACCUM, 0, 0, 12>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 13) XCvec_nn_n<T, ACCUM, 0, 0, 13>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 14) XCvec_nn_n<T, ACCUM, 0, 0, 14>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 15) XCvec_nn_n<T, ACCUM, 0, 0, 15>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 16) XCvec_nn_n<T, ACCUM, 0, 0, 16>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 17) XCvec_nn_n<T, ACCUM, 0, 0, 17>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 18) XCvec_nn_n<T, ACCUM, 0, 0, 18>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 19) XCvec_nn_n<T, ACCUM, 0, 0, 19>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 20) XCvec_nn_n<T, ACCUM, 0, 0, 20>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 21) XCvec_nn_n<T, ACCUM, 0, 0, 21>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 22) XCvec_nn_n<T, ACCUM, 0, 0, 22>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 23) XCvec_nn_n<T, ACCUM, 0, 0, 23>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 24) XCvec_nn_n<T, ACCUM, 0, 0, 24>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 25) XCvec_nn_n<T, ACCUM, 0, 0, 25>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 26) XCvec_nn_n<T, ACCUM, 0, 0, 26>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 27) XCvec_nn_n<T, ACCUM, 0, 0, 27>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 28) XCvec_nn_n<T, ACCUM, 0, 0, 28>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 29) XCvec_nn_n<T, ACCUM, 0, 0, 29>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 30) XCvec_nn_n<T, ACCUM, 0, 0, 30>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 31) XCvec_nn_n<T, ACCUM, 0, 0, 31>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 32) XCvec_nn_n<T, ACCUM, 0, 0, 32>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else {
printf("Running vec_nn (cputiled), using masked\n");
if (p->patch_w == 1) XCvec_nn_n<T, ACCUM, 1, 0, 1>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 2) XCvec_nn_n<T, ACCUM, 1, 0, 2>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 3) XCvec_nn_n<T, ACCUM, 1, 0, 3>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 4) XCvec_nn_n<T, ACCUM, 1, 0, 4>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 5) XCvec_nn_n<T, ACCUM, 1, 0, 5>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 6) XCvec_nn_n<T, ACCUM, 1, 0, 6>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 7) XCvec_nn_n<T, ACCUM, 1, 0, 7>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 8) XCvec_nn_n<T, ACCUM, 1, 0, 8>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 9) XCvec_nn_n<T, ACCUM, 1, 0, 9>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 10) XCvec_nn_n<T, ACCUM, 1, 0, 10>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 11) XCvec_nn_n<T, ACCUM, 1, 0, 11>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 12) XCvec_nn_n<T, ACCUM, 1, 0, 12>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 13) XCvec_nn_n<T, ACCUM, 1, 0, 13>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 14) XCvec_nn_n<T, ACCUM, 1, 0, 14>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 15) XCvec_nn_n<T, ACCUM, 1, 0, 15>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 16) XCvec_nn_n<T, ACCUM, 1, 0, 16>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 17) XCvec_nn_n<T, ACCUM, 1, 0, 17>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 18) XCvec_nn_n<T, ACCUM, 1, 0, 18>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 19) XCvec_nn_n<T, ACCUM, 1, 0, 19>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 20) XCvec_nn_n<T, ACCUM, 1, 0, 20>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 21) XCvec_nn_n<T, ACCUM, 1, 0, 21>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 22) XCvec_nn_n<T, ACCUM, 1, 0, 22>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 23) XCvec_nn_n<T, ACCUM, 1, 0, 23>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 24) XCvec_nn_n<T, ACCUM, 1, 0, 24>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 25) XCvec_nn_n<T, ACCUM, 1, 0, 25>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 26) XCvec_nn_n<T, ACCUM, 1, 0, 26>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 27) XCvec_nn_n<T, ACCUM, 1, 0, 27>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 28) XCvec_nn_n<T, ACCUM, 1, 0, 28>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 29) XCvec_nn_n<T, ACCUM, 1, 0, 29>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 30) XCvec_nn_n<T, ACCUM, 1, 0, 30>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 31) XCvec_nn_n<T, ACCUM, 1, 0, 31>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else if (p->patch_w == 32) XCvec_nn_n<T, ACCUM, 1, 0, 32>(p, a, b, ann, annd, amask, bmask, level, em_iter, rp, offset_iter, update_type, cache_b, region_masks, tiles);
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
}
else {
fprintf(stderr, "vec_nn: algorithm %d unsupported\n", p->algo); exit(1);
}
}
template<class T, class ACCUM, int IS_WINDOW, int HAS_MASKS, int PATCH_W>
void XCvec_minnn_n(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, VECBITMAP<ACCUM> *annd, BITMAP *ann_prev, BITMAP *bmask, int level, int em_iter, RecomposeParams *rp, RegionMasks *region_masks, RegionMasks *amask, int ntiles) {
if (ntiles < 0) { ntiles = p->cores; }
printf("vec_minnn: %d %d %d %d, tiles=%d\n", ann->w, ann->h, ann_prev->w, ann_prev->h, ntiles);
if (!rp) { fprintf(stderr, "vec_minnn_n: rp is NULL\n"); exit(1); }
// double start_t = accurate_timer();
Box box = get_abox_vec(p, a, amask, 1); // trim patch !!!
#pragma omp parallel for schedule(static,4) num_threads(ntiles)
for (int y = box.ymin; y < box.ymax; y++) {
int *amask_row = amask ? (int *) amask->bmp->line[y]: NULL;
ACCUM *annd_row = (ACCUM *) annd->line_n1(y);
for (int x = box.xmin; x < box.xmax; x++) {
if (HAS_MASKS && amask && amask_row[x]) { continue; }
ACCUM dcurrent = annd_row[x];
int xp, yp;
getnn(ann_prev, x, y, xp, yp);
if ((unsigned) xp >= (unsigned) (b->w-p->patch_w+1) ||
(unsigned) yp >= (unsigned) (b->h-p->patch_w+1)) { continue; }
if (HAS_MASKS && bmask && ((int *) bmask->line[yp])[xp]) { continue; }
ACCUM dprev = XCvec_patch_dist_ab<T, ACCUM, IS_WINDOW, HAS_MASKS, PATCH_W>(p, a, x, y, b, xp, yp, dcurrent, region_masks);
if (dprev < dcurrent) {
_putpixel32(ann, x, y, XY_TO_INT(xp, yp));
annd_row[x] = dprev;
}
}
}
// nn_time += accurate_timer() - start_t;
Params pcopy(*p);
pcopy.nn_iters = rp->minnn_optp_nn_iters;
pcopy.rs_max = rp->minnn_optp_rs_max;
XCvec_nn<T, ACCUM>(&pcopy, a, b, ann, annd, amask, bmask, level, em_iter, rp, 0, 0, 1, region_masks, ntiles);
}
template<class T, class ACCUM>
void XCvec_minnn(Params *p, VECBITMAP<T> *a, VECBITMAP<T> *b, BITMAP *ann, VECBITMAP<ACCUM> *annd, BITMAP *ann_prev, BITMAP *bmask, int level, int em_iter, RecomposeParams *rp, RegionMasks *region_masks, RegionMasks *amask, int ntiles) {
if (is_window(p)) {
if (p->patch_w == 1) { return XCvec_minnn_n<T, ACCUM, 1, 1, 1>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 2) { return XCvec_minnn_n<T, ACCUM, 1, 1, 2>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 3) { return XCvec_minnn_n<T, ACCUM, 1, 1, 3>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 4) { return XCvec_minnn_n<T, ACCUM, 1, 1, 4>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 5) { return XCvec_minnn_n<T, ACCUM, 1, 1, 5>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 6) { return XCvec_minnn_n<T, ACCUM, 1, 1, 6>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 7) { return XCvec_minnn_n<T, ACCUM, 1, 1, 7>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 8) { return XCvec_minnn_n<T, ACCUM, 1, 1, 8>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 9) { return XCvec_minnn_n<T, ACCUM, 1, 1, 9>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 10) { return XCvec_minnn_n<T, ACCUM, 1, 1, 10>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 11) { return XCvec_minnn_n<T, ACCUM, 1, 1, 11>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 12) { return XCvec_minnn_n<T, ACCUM, 1, 1, 12>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 13) { return XCvec_minnn_n<T, ACCUM, 1, 1, 13>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 14) { return XCvec_minnn_n<T, ACCUM, 1, 1, 14>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 15) { return XCvec_minnn_n<T, ACCUM, 1, 1, 15>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 16) { return XCvec_minnn_n<T, ACCUM, 1, 1, 16>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 17) { return XCvec_minnn_n<T, ACCUM, 1, 1, 17>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 18) { return XCvec_minnn_n<T, ACCUM, 1, 1, 18>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 19) { return XCvec_minnn_n<T, ACCUM, 1, 1, 19>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 20) { return XCvec_minnn_n<T, ACCUM, 1, 1, 20>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 21) { return XCvec_minnn_n<T, ACCUM, 1, 1, 21>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 22) { return XCvec_minnn_n<T, ACCUM, 1, 1, 22>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 23) { return XCvec_minnn_n<T, ACCUM, 1, 1, 23>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 24) { return XCvec_minnn_n<T, ACCUM, 1, 1, 24>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 25) { return XCvec_minnn_n<T, ACCUM, 1, 1, 25>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 26) { return XCvec_minnn_n<T, ACCUM, 1, 1, 26>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 27) { return XCvec_minnn_n<T, ACCUM, 1, 1, 27>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 28) { return XCvec_minnn_n<T, ACCUM, 1, 1, 28>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 29) { return XCvec_minnn_n<T, ACCUM, 1, 1, 29>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 30) { return XCvec_minnn_n<T, ACCUM, 1, 1, 30>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 31) { return XCvec_minnn_n<T, ACCUM, 1, 1, 31>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 32) { return XCvec_minnn_n<T, ACCUM, 1, 1, 32>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else if (bmask || region_masks || amask) {
if (p->patch_w == 1) { return XCvec_minnn_n<T, ACCUM, 0, 1, 1>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 2) { return XCvec_minnn_n<T, ACCUM, 0, 1, 2>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 3) { return XCvec_minnn_n<T, ACCUM, 0, 1, 3>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 4) { return XCvec_minnn_n<T, ACCUM, 0, 1, 4>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 5) { return XCvec_minnn_n<T, ACCUM, 0, 1, 5>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 6) { return XCvec_minnn_n<T, ACCUM, 0, 1, 6>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 7) { return XCvec_minnn_n<T, ACCUM, 0, 1, 7>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 8) { return XCvec_minnn_n<T, ACCUM, 0, 1, 8>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 9) { return XCvec_minnn_n<T, ACCUM, 0, 1, 9>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 10) { return XCvec_minnn_n<T, ACCUM, 0, 1, 10>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 11) { return XCvec_minnn_n<T, ACCUM, 0, 1, 11>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 12) { return XCvec_minnn_n<T, ACCUM, 0, 1, 12>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 13) { return XCvec_minnn_n<T, ACCUM, 0, 1, 13>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 14) { return XCvec_minnn_n<T, ACCUM, 0, 1, 14>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 15) { return XCvec_minnn_n<T, ACCUM, 0, 1, 15>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 16) { return XCvec_minnn_n<T, ACCUM, 0, 1, 16>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 17) { return XCvec_minnn_n<T, ACCUM, 0, 1, 17>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 18) { return XCvec_minnn_n<T, ACCUM, 0, 1, 18>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 19) { return XCvec_minnn_n<T, ACCUM, 0, 1, 19>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 20) { return XCvec_minnn_n<T, ACCUM, 0, 1, 20>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 21) { return XCvec_minnn_n<T, ACCUM, 0, 1, 21>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 22) { return XCvec_minnn_n<T, ACCUM, 0, 1, 22>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 23) { return XCvec_minnn_n<T, ACCUM, 0, 1, 23>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 24) { return XCvec_minnn_n<T, ACCUM, 0, 1, 24>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 25) { return XCvec_minnn_n<T, ACCUM, 0, 1, 25>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 26) { return XCvec_minnn_n<T, ACCUM, 0, 1, 26>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 27) { return XCvec_minnn_n<T, ACCUM, 0, 1, 27>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 28) { return XCvec_minnn_n<T, ACCUM, 0, 1, 28>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 29) { return XCvec_minnn_n<T, ACCUM, 0, 1, 29>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 30) { return XCvec_minnn_n<T, ACCUM, 0, 1, 30>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 31) { return XCvec_minnn_n<T, ACCUM, 0, 1, 31>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 32) { return XCvec_minnn_n<T, ACCUM, 0, 1, 32>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
else {
if (p->patch_w == 1) { return XCvec_minnn_n<T, ACCUM, 0, 0, 1>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 2) { return XCvec_minnn_n<T, ACCUM, 0, 0, 2>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 3) { return XCvec_minnn_n<T, ACCUM, 0, 0, 3>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 4) { return XCvec_minnn_n<T, ACCUM, 0, 0, 4>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 5) { return XCvec_minnn_n<T, ACCUM, 0, 0, 5>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 6) { return XCvec_minnn_n<T, ACCUM, 0, 0, 6>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 7) { return XCvec_minnn_n<T, ACCUM, 0, 0, 7>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 8) { return XCvec_minnn_n<T, ACCUM, 0, 0, 8>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 9) { return XCvec_minnn_n<T, ACCUM, 0, 0, 9>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 10) { return XCvec_minnn_n<T, ACCUM, 0, 0, 10>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 11) { return XCvec_minnn_n<T, ACCUM, 0, 0, 11>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 12) { return XCvec_minnn_n<T, ACCUM, 0, 0, 12>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 13) { return XCvec_minnn_n<T, ACCUM, 0, 0, 13>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 14) { return XCvec_minnn_n<T, ACCUM, 0, 0, 14>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 15) { return XCvec_minnn_n<T, ACCUM, 0, 0, 15>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 16) { return XCvec_minnn_n<T, ACCUM, 0, 0, 16>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 17) { return XCvec_minnn_n<T, ACCUM, 0, 0, 17>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 18) { return XCvec_minnn_n<T, ACCUM, 0, 0, 18>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 19) { return XCvec_minnn_n<T, ACCUM, 0, 0, 19>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 20) { return XCvec_minnn_n<T, ACCUM, 0, 0, 20>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 21) { return XCvec_minnn_n<T, ACCUM, 0, 0, 21>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 22) { return XCvec_minnn_n<T, ACCUM, 0, 0, 22>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 23) { return XCvec_minnn_n<T, ACCUM, 0, 0, 23>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 24) { return XCvec_minnn_n<T, ACCUM, 0, 0, 24>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 25) { return XCvec_minnn_n<T, ACCUM, 0, 0, 25>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 26) { return XCvec_minnn_n<T, ACCUM, 0, 0, 26>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 27) { return XCvec_minnn_n<T, ACCUM, 0, 0, 27>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 28) { return XCvec_minnn_n<T, ACCUM, 0, 0, 28>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 29) { return XCvec_minnn_n<T, ACCUM, 0, 0, 29>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 30) { return XCvec_minnn_n<T, ACCUM, 0, 0, 30>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 31) { return XCvec_minnn_n<T, ACCUM, 0, 0, 31>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else if (p->patch_w == 32) { return XCvec_minnn_n<T, ACCUM, 0, 0, 32>(p, a, b, ann, annd, ann_prev, bmask, level, em_iter, rp, region_masks, amask, ntiles); }
else { fprintf(stderr, "Patch size unsupported: %d\n", p->patch_w); exit(1); }
}
}
// ------------------------------
// Similarity stuff hereafter
// ------------------------------
BITMAP *vecbitmap_to_bitmap(VECBITMAP<int> *a);
#define VEC_MODE_PATCH 0
#define VEC_MODE_DESC 1
#define VEC_MODE_SIM 2
BITMAP *vecwrap_init_nn(int vec_mode, Params *p, BITMAP *a, BITMAP *b, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL, BITMAP **ann_sim=NULL);
BITMAP *vecwrap_init_dist(int vec_mode, Params *p, BITMAP *a, BITMAP *b, BITMAP *ann, BITMAP *bmask=NULL, RegionMasks *region_masks=NULL, RegionMasks *amask=NULL, BITMAP *ann_sim=NULL);
void vecwrap_nn(int vec_mode, Params *p, BITMAP *a, BITMAP *b,
BITMAP *ann, BITMAP *annd,
RegionMasks *amask=NULL, BITMAP *bmask=NULL,
int level=0, int em_iter=0, RecomposeParams *rp=NULL, int offset_iter=0, int update_type=0, int cache_b=0,
RegionMasks *region_masks=NULL, int tiles=-1, BITMAP *ann_sim=NULL);
BITMAP *vecwrap_vote(int vec_mode, Params *p, BITMAP *b,
BITMAP *ann, BITMAP *ann_sim=NULL, BITMAP *bnn=NULL,
BITMAP *bmask=NULL, BITMAP *bweight=NULL,
double coherence_weight=COHERENCE_WEIGHT, double complete_weight=COMPLETE_WEIGHT,
RegionMasks *amask=NULL, BITMAP *aweight=NULL, BITMAP *ainit=NULL, RegionMasks *region_masks=NULL, BITMAP *aconstraint=NULL, int mask_self_only=0);
#endif
|
DCSRTile.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_DCSRTILE_H_
#define SRC_DCSRTILE_H_
#include <string>
#include <algorithm>
#include <vector>
#include "GMDP/utils/binary_search.h"
template <typename T>
class DCSRTile {
public:
std::string name;
int m;
int n;
int num_rows;
int nnz;
int num_partitions;
int * partition_ptrs;
T* a;
int* ja;
int* ia;
int* row_ids;
// Serialize
friend boost::serialization::access;
template<class Archive>
void save(Archive& ar, const unsigned int version) const {
ar & name;
ar & m;
ar & n;
ar & num_rows;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < m+1 ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_rows ; i++)
{
ar & row_ids[i];
}
}
}
template<class Archive>
void load(Archive& ar, const unsigned int version) {
ar & name;
ar & m;
ar & n;
ar & num_rows;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
row_ids = reinterpret_cast<int*>(_mm_malloc(num_rows * sizeof(int), 64));
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < m+1 ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_rows ; i++)
{
ar & row_ids[i];
}
}
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
DCSRTile() : name("TEMP"), m(0), n(0), nnz(0) {}
DCSRTile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0) {}
DCSRTile(edge_t<T>* edges, int _m, int _n, int _nnz, int row_start,
int col_start)
: name("TEMP"), m(_m), n(_n), nnz(_nnz) {
if(nnz > 0)
{
__gnu_parallel::sort(edges, edges + nnz, [](const edge_t<T>& a, const edge_t<T>& b)
{
if (a.src < b.src) return true; else if (a.src > b.src) return false;
if (a.dst < b.dst) return true; else if (a.dst > b.dst) return false;
return false;
});
int * tmp_buf = new int[nnz];
tmp_buf[0] = 0;
for(int i = 0 ; i < nnz-1 ; i++)
{
if(edges[i+1].src > edges[i].src)
{
tmp_buf[i+1] = tmp_buf[i] + 1;
}
else
{
tmp_buf[i+1] = tmp_buf[i];
}
}
num_rows = tmp_buf[nnz-1]+1;
row_ids = reinterpret_cast<int*>(_mm_malloc(((num_rows)) * sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc(((num_rows) + 1) * sizeof(int), 64));
row_ids[0] = (edges[0].src - row_start) - 1;
ia[0] = 0;
for(int i = 0 ; i < nnz-1 ; i++)
{
if(edges[i+1].src > edges[i].src)
{
row_ids[tmp_buf[i+1]] = (edges[i+1].src - row_start) - 1;
ia[tmp_buf[i+1]] = i+1;
}
}
ia[num_rows] = nnz;
delete [] tmp_buf;
num_partitions = omp_get_max_threads() * 4;
partition_ptrs = new int[num_partitions+1];
int rows_per_partition = ((num_rows + num_partitions) - 1) / num_partitions;
partition_ptrs[0] = 0;
for(int p = 1 ; p < num_partitions ; p++)
{
int new_row = partition_ptrs[p-1] + rows_per_partition;
if(new_row > num_rows)
{
new_row = num_rows;
}
// Increase new row to next 32-bit boundary
int row32 = row_ids[new_row] / 32;
while((new_row < num_rows) && ((row_ids[new_row] / 32) == row32))
{
new_row++;
}
partition_ptrs[p] = new_row;
}
partition_ptrs[num_partitions] = num_rows;
ja = reinterpret_cast<int*>(_mm_malloc((nnz ) * sizeof(int), 64));
a = reinterpret_cast<T*>(_mm_malloc((nnz) * sizeof(T), 64));
for(int i = 0 ; i < num_rows ; i++)
{
for(int j = ia[i] ; j < ia[i+1] ; j++)
{
ja[j] = (edges[j].dst - col_start) - 1;
a[j] = edges[j].val;
}
}
#ifdef __DEBUG
unsigned long int nzcnt = 0;
for(int p = 0 ; p < num_partitions ; p++)
{
for(int _row = partition_ptrs[p] ; _row < partition_ptrs[p+1]; _row++)
{
int row = row_ids[_row];
for(int j = ia[_row] ; j < ia[_row+1] ; j++)
{
assert(edges[nzcnt].src == (row + row_start + 1) );
assert(edges[nzcnt].dst == (ja[j] + col_start + 1));
assert(edges[nzcnt].val == (a[j]));
nzcnt++;
}
}
}
assert(nzcnt == nnz);
#endif
}
}
bool isEmpty() const { return nnz <= 0; }
void get_edges(edge_t<T>* edges, int row_start, int col_start) {
unsigned int nnzcnt = 0;
if(this->nnz > 0)
{
#pragma omp parallel for reduction(+:nnzcnt)
for (int i = 0; i < this->num_rows; i++) {
for (int nz_id = ia[i]; nz_id < ia[i + 1]; nz_id++) {
edges[nz_id].src = row_ids[i] + row_start + 1;
edges[nz_id].dst = ja[nz_id] + col_start + 1;
edges[nz_id].val = a[nz_id];
nnzcnt++;
}
}
assert(nnzcnt == this->nnz);
}
}
DCSRTile& operator=(DCSRTile other) {
this->name = other.name;
this->m = other.m;
this->n = other.n;
this->num_rows = other.num_rows;
this->nnz = other.nnz;
this->a = other.a;
this->ia = other.ia;
this->row_ids = other.row_ids;
this->ja = other.ja;
this->num_partitions = other.num_partitions;
this->partition_ptrs = other.partition_ptrs;
}
void clear() {
nnz = 0;
}
~DCSRTile(void) {
if (!isEmpty()) {
_mm_free(a);
_mm_free(ja);
_mm_free(ia);
_mm_free(row_ids);
delete [] partition_ptrs;
}
}
};
#endif // SRC_DCSRTILE_H_
|
no_thread_num_clause.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
omp_set_num_threads(4);
#pragma omp parallel
{
print_ids(0);
print_ids(1);
}
print_fuzzy_address(1);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS: 0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=0, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=281474976710658, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker={{[0-9]+}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
alignment.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <libgen.h>
int readseqs(int first_seq, char *filename);
int ktup, window, signif;
int prot_ktup, prot_window, prot_signif;
int gap_pos1, gap_pos2, mat_avscore;
int nseqs, max_aa;
#define MAX_ALN_LENGTH 5000
#define NUMRES 256
int *seqlen_array, def_aa_xref[NUMRES+1];
int *bench_output, *seq_output;
double gap_open, gap_extend;
double prot_gap_open, prot_gap_extend;
double pw_go_penalty, pw_ge_penalty;
double prot_pw_go_penalty, prot_pw_ge_penalty;
char **args, **names, **seq_array;
int matrix[NUMRES][NUMRES];
#define MIN(a,b) ((a)<(b)?(a):(b))
#define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k))
#define tegap(k) ((k) <= 0 ? 0 : te + gh * (k))
int pairalign(int istart, int iend, int jstart, int jend)
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = 0;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
bots_message("Start aligning ");
#pragma omp parallel
{
#pragma omp for schedule(dynamic) private(i,n,si,sj,len1,m)
for (si = 0; si < nseqs; si++) {
if ((n = seqlen_array[si+1]) != 0){
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
printf("len %p %d n %d\n", &len1, len1, n);
for (sj = si + 1; sj < nseqs; sj++)
{
if ((m = seqlen_array[sj+1]) != 0)
{
#pragma omp task untied \
private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \
shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore)
{
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
gh = 10 * pw_ge_penalty;
gg = pw_go_penalty + log((double) MIN(n, m));
g = (mat_avscore <= 0) ? 20 * gg : 2 * mat_avscore * gg;
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, &last_print, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
bench_output[si*nseqs+sj] = mm_score;
}
}
}
}
}
}
bots_message(" completed!\n");
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
edgebased_levelset.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Antonia Larese
//
#if !defined(KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED
//#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/global_pointer_variables.h"
#include "includes/node.h"
#include "includes/cfd_variables.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "free_surface_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class EdgeBasedLevelSet
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef std::size_t SizeType;
//constructor and destructor
EdgeBasedLevelSet(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
const double viscosity,
const double density,
const Vector body_force,
bool use_mass_correction,
double edge_detection_angle,
double stabdt_pressure_factor,
double stabdt_convection_factor,
double tau2_factor,
bool assume_constant_dp
)
: mr_matrix_container(mr_matrix_container),
mr_model_part(mr_model_part),
mstabdt_pressure_factor(stabdt_pressure_factor),
mstabdt_convection_factor(stabdt_convection_factor),
medge_detection_angle(edge_detection_angle),
mtau2_factor(tau2_factor),
massume_constant_dp(assume_constant_dp)
{
for (ModelPart::NodesContainerType::iterator it=mr_model_part.NodesBegin(); it!=mr_model_part.NodesEnd(); it++)
it->FastGetSolutionStepValue (VISCOSITY) = viscosity;
mMolecularViscosity = viscosity;
for(unsigned int i = 0; i<TDim; i++)
mBodyForce[i] = body_force[i];
mRho = density;
mdelta_t_avg = 1000.0;
max_dt = 1.0;
muse_mass_correction = use_mass_correction;
mshock_coeff = 0.7;
mWallLawIsActive = false;
};
~EdgeBasedLevelSet()
{
};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mViscosity.resize (n_nodes);
mr_matrix_container.SetToZero (mViscosity);
mWork.resize(n_nodes);
mr_matrix_container.SetToZero(mWork);
mvel_n.resize(n_nodes);
mr_matrix_container.SetToZero(mvel_n);
mvel_n1.resize(n_nodes);
mr_matrix_container.SetToZero(mvel_n1);
mPn.resize(n_nodes);
mr_matrix_container.SetToZero(mPn);
mPn1.resize(n_nodes);
mr_matrix_container.SetToZero(mPn1);
mHmin.resize(n_nodes);
mr_matrix_container.SetToZero(mHmin);
mHavg.resize(n_nodes);
mr_matrix_container.SetToZero(mHavg);
mNodalFlag.resize(n_nodes);
mr_matrix_container.SetToZero(mNodalFlag);
mdistances.resize(n_nodes);
mr_matrix_container.SetToZero(mdistances);
mTauPressure.resize(n_nodes);
mr_matrix_container.SetToZero(mTauPressure);
mTauConvection.resize(n_nodes);
mr_matrix_container.SetToZero(mTauConvection);
mTau2.resize(n_nodes);
mr_matrix_container.SetToZero(mTau2);
mPi.resize(n_nodes);
mr_matrix_container.SetToZero(mPi);
mXi.resize(n_nodes);
mr_matrix_container.SetToZero(mXi);
mx.resize(n_nodes);
mr_matrix_container.SetToZero(mx);
mEdgeDimensions.resize(n_edges);
mr_matrix_container.SetToZero(mEdgeDimensions);
//convection variables
mBeta.resize(n_nodes);
mr_matrix_container.SetToZero(mBeta);
mPiConvection.resize(n_nodes);
mr_matrix_container.SetToZero(mPiConvection);
mphi_n.resize(n_nodes);
mr_matrix_container.SetToZero(mphi_n);
mphi_n1.resize(n_nodes);
mr_matrix_container.SetToZero(mphi_n1);
mEps.resize(n_nodes);
mr_matrix_container.SetToZero(mEps);
//mD.resize(n_nodes); mr_matrix_container.SetToZero(mD);
mA.resize(n_nodes);
mr_matrix_container.SetToZero(mA);
mB.resize(n_nodes);
mr_matrix_container.SetToZero(mB);
mStrVel.resize(n_nodes);
mr_matrix_container.SetToZero(mStrVel);
mdiv_error.resize(n_nodes);
mr_matrix_container.SetToZero(mdiv_error);
mdiag_stiffness.resize (n_nodes);
mr_matrix_container.SetToZero (mdiag_stiffness);
mis_slip.resize (n_nodes);
// ValuesVectorType external_pressure;
// external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
std::vector< unsigned int> tempFixedVelocities;
std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues;
std::vector< unsigned int> tempPressureOutletList;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
int index = inode->FastGetSolutionStepValue(AUX_INDEX);
if (inode->IsFixed(VELOCITY_X)) //note that the variables can be either all fixed or no one fixed
{
if (inode->IsFixed(VELOCITY_Y) == false || inode->IsFixed(VELOCITY_Z) == false)
{
std::cout << "error found on the fixity of node " << inode->Id() << std::endl;
KRATOS_THROW_ERROR(std::logic_error, "velocities can be either all fixed or none fixed", "")
}
tempFixedVelocities.push_back(index);
tempFixedVelocitiesValues.push_back(mvel_n1[index]);
}
if (inode->IsFixed(PRESSURE))
{
tempPressureOutletList.push_back(index);
// mPressureOutlet.push_back(external_pressure[index]);
}
}
mFixedVelocities.resize(tempFixedVelocities.size(),false);
mFixedVelocitiesValues.resize(tempFixedVelocitiesValues.size(),false);
mPressureOutletList.resize(tempPressureOutletList.size(),false);
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempFixedVelocities.size()); i++)
{
mFixedVelocities[i] = tempFixedVelocities[i];
mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i];
}
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempPressureOutletList.size()); i++)
{
mPressureOutletList[i] = tempPressureOutletList[i];
}
//compute slip normals and fill SlipList
CalculateNormals(mr_model_part.Conditions());
mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes());
if(TDim == 3)
DetectEdges3D(mr_model_part.Conditions());
//determine number of edges and entries
//// not implemented in ublas yet !!!
//unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize(n_nodes, n_nodes, false);
int number_of_threads= OpenMPUtils::GetNumThreads();
std::vector<int> row_partition(number_of_threads);
OpenMPUtils::DivideInPartitions(n_nodes,number_of_threads,row_partition);
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (OpenMPUtils::ThisThread() == k)
{
for (int i_node = static_cast<int> (row_partition[k]); i_node < static_cast<int> (row_partition[k + 1]); i_node++)
{
//loop over all nodes
// for (unsigned int i_node = 0; i_node < n_nodes; i_node++) {
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//define matrix structure row by row (the order does matter!)
if ((static_cast<int>(j_neighbour) > i_node) && (flag == 0))
{
//add diagonal/nodal contribution
mL.push_back(i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back(i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back(i_node, i_node, 0.0);
}
}
}
//compute minimum length of the surrounding edges
CalculateEdgeLengths(mr_model_part.Nodes());
//set the pressure projection to the body force value
array_1d<double,3> temp = ZeroVector(3);
for(unsigned int i = 0 ; i < TDim; i++)
temp[i]= mRho * mBodyForce[i];
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
array_1d<double, 3> & press_proj = inode->FastGetSolutionStepValue(PRESS_PROJ);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
press_proj[l_comp] = temp[l_comp];
}
KRATOS_CATCH("")
}
void SetShockCapturingCoefficient(double coeff)
{
mshock_coeff = coeff;
}
//***************************************
//function to set adequate time step size
double ComputeTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;//max_dt;
mdelta_t_avg = 1e10;//max_dt;
//getting value of current velocity and of viscosity
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, mr_model_part.Nodes());
//*******************
//loop over all nodes
unsigned int n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
const double eps_i = mEps[i_node];
//const double d_i = mD[i_node];
const double nu = mViscosity[i_node];
// const double lindarcy_i = mA[i_node];
// const double nonlindarcy_i = mB[i_node];
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// array_1d<double, TDim> rel_vel_i;
// rel_vel_i[0] = v_i[0] - str_v_i[0];
// rel_vel_i[1] = v_i[1] - str_v_i[1];
// rel_vel_i[2] = v_i[2] - str_v_i[2];
// double rel_vel_norm = norm_2(rel_vel_i);
// double vel_norm = norm_2(v_i);
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += v_i[l_comp]*v_i[l_comp];
}
vel_norm = sqrt(vel_norm);
// double porosity_coefficient = ComputePorosityCoefficient( rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i)/*+ porosity_coefficient*/);
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * nu / (havg_i * havg_i) /*+ porosity_coefficient*/);
// double delta_t_i = 1.0 / ( vel_norm /hmin_i + nu / (hmin_i * hmin_i)/*+ porosity_coefficient*/);
// double delta_t_i_avg = 1.0 / ( vel_norm /havg_i + nu / (havg_i * havg_i) /*+ porosity_coefficient*/);
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
v_diff_norm /= eps_i;
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i));
// double delta_t_j = 1.0 / ( v_diff_norm /hmin_i + nu / (hmin_i * hmin_i));
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
return delta_t;
KRATOS_CATCH("")
}
void ApplySmagorinsky (double MolecularViscosity, double Cs)
{
if (Cs != 0)
{
if (TDim == 3)
ApplySmagorinsky3D (MolecularViscosity, Cs);
else
ApplySmagorinsky2D (MolecularViscosity, Cs);
}
}
void UpdateFixedVelocityValues()
{
KRATOS_TRY
//read velocity and pressure data from Kratos
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i_fix[comp] = u_i[comp];
}
KRATOS_CATCH("");
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
void SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
double time_inv_avg = 1.0/mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
double stabdt_convection_factor = mstabdt_convection_factor;
double tau2_factor = mtau2_factor;
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor,tau2_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity[i_node];
const double eps_i = mEps[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += a_i[l_comp]*a_i[l_comp];
}
vel_norm = sqrt(vel_norm);
const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
array_1d<double, TDim> rel_vel_i;
double rel_vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
rel_vel_i[l_comp] = a_i[l_comp] - str_v_i[l_comp];
rel_vel_norm += rel_vel_i[l_comp]*rel_vel_i[l_comp];
}
rel_vel_norm = sqrt(rel_vel_norm);
double porosity_coefficient = ComputePorosityCoefficient(rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double denom = (2.0 * vel_norm / h_avg_i + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double tau = 0.0;
// if(denom > max_dt_inv_coeff)
// tau = max_dt_coeff;
// else
// tau = 1.0/denom;
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + max_dt_inv + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + 0.01*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
double tau_conv = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_convection_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
mTauPressure[i_node] = tau;
mTauConvection[i_node] = tau_conv;
mTau2[i_node] = (nu_i + h_avg_i*vel_norm*0.5)*tau2_factor;
// mTauPressure[i_node] = 1.0 / (2.0 * vel_norm / mHavg[i_node] + (4.0*nu_i) / (mHavg[i_node] * mHavg[i_node]));
// mTauConvection[i_node] = 1.0 / (2.0 * vel_norm / h_i + time_inv + (4.0*nu_i) / (h_i * h_i));
//// mTauPressure[i_node] = 1.0 / (2.0 * vel_norm / h_i + 0.01 * time_inv + 4.0 * nu_i / (h_i * h_i));
//// // mTauPressure[i_node] = delta_t;
//// mTauConvection[i_node] = 1.0 / (2.0 * vel_norm / h_i + 0.01 * time_inv + 4.0 * nu_i / (h_i * h_i));
// if (mTauPressure[i_node] < delta_t)
// mTauPressure[i_node] = delta_t;
// else if(mTauPressure[i_node] > 100.0*delta_t)
// mTauPressure[i_node] = 100.0*delta_t;
}
//// //the tau is set to 1/dt on the corner nodes
//// //apply conditions on corners
//// int corner_size = mcorner_nodes.size();
//// for (int i = 0; i < corner_size; i++)
//// {
//// int i_node = mcorner_nodes[i];
//// mTauPressure[i_node] = mdelta_t_avg;
//// mTauConvection[i_node] = mdelta_t_avg;
//// }
// //laplacian smoothing on the taus
// //note here that we use mTau2 as a temporary vector
// LaplacianSmooth(mTauConvection, mTau2);
// LaplacianSmooth(mTauPressure, mTau2);
// #pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// mTau2[i_node] = 0.0;
// mr_matrix_container.AssignVectorToVector(mTauPressure, mTauConvection);
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node]; //******************
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
// const double& p_i = mPn1[i_node];
const double& eps_i = mEps[i_node];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_i /= eps_i;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_i[comp] -= str_v_i[comp];}
// // ****************************************rel_vel_modifications_e
//const double& p_i = pressure[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
const double& eps_j = mEps[j_neighbour];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_j /= eps_j;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_j = mStrVel[j_neighbour];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_j[comp] -= str_v_j[comp];}
// // ****************************************rel_vel_modifications_e
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i, a_i, U_i, a_j, U_j);
// edge_ij.Add_grad_p(pi_i, p_i, p_j);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
//std::cout << "substep " << substep+1 << " of " << n_substeps << std::endl;
mr_matrix_container.AssignVectorToVector (mvel_n, mWork); //mWork = mvel_n
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector (mvel_n, mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness,rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//second step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//third step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//fourth step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector (mWork, mvel_n1);
ApplyVelocityBC (mvel_n1);
//prepare for next step
//mr_matrix_container.AssignVectorToVector (mvel_n1, mvel_n);//???????????????????????????????????????
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const CalcVectorType& vel,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
CalcVectorType& rhs,
ValuesVectorType& diag_stiffness)
{
KRATOS_TRY
int n_nodes = vel.size();
//perform MPI syncronization
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
double inverse_rho = 1.0 / mRho;
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double nu_i = mViscosity[i_node];
const double nu_j = nu_i;
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = convective_velocity[i_node];
// const double& beta_i = mBeta[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
const double& eps_i = mEps[i_node];
// //const double& d_i = mD[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
array_1d<double, TDim> rel_vel_i;
double rel_vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
rel_vel_i[l_comp] = U_i[l_comp] - str_v_i[l_comp];
rel_vel_norm += rel_vel_i[l_comp]*rel_vel_i[l_comp];
}
rel_vel_norm = sqrt(rel_vel_norm);
//const double& tau2_i = mTau2[i_node];
double edge_tau = mTauConvection[i_node];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_i /= eps_i;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_i[comp] -= str_v_i[comp];}
// // ****************************************rel_vel_modifications_e
//
//double& h_i = mHmin[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * eps_i * f_i[comp] ;
//applying the effect of the porosity
// double porosity_coefficient = ComputePorosityCoefficient(mViscosity,norm_2(U_i),eps_i, d_i);
// double porosity_coefficient = ComputePorosityCoefficient( norm_2(U_i), eps_i, lindarcy_i, nonlindarcy_i);
double porosity_coefficient = ComputePorosityCoefficient( rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
diag_stiffness[i_node]= m_i * porosity_coefficient;
// /**************************************************rel_vel_modifications_b*/
for (unsigned int comp = 0; comp < TDim; comp++)
{
// rhs_i[comp] -= m_i * porosity_coefficient * U_i[comp];
rhs_i[comp] += m_i * porosity_coefficient * str_v_i[comp];
}
// /*************************************************rel_vel_modifications_e*/
//std::cout << i_node << "rhs =" << rhs_i << "after adding body force" << std::endl;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = vel[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
const double& eps_j = mEps[j_neighbour];
// const double& beta_j = mBeta[j_neighbour];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_j /= eps_j;
/*convective front velocity == fluid velocity - structural velocity*/
// ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_j = mStrVel[j_neighbour];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_j[comp] -= str_v_j[comp];}
// ****************************************/*rel_vel_modifications*/_e
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
// std::cout << i_node << "rhs =" << rhs_i << "after convective contrib" << std::endl;
//take care! we miss including a B.C. for the external pressure
// edge_ij.Add_Gp(rhs_i,p_i*inverse_rho,p_j*inverse_rho);
edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho*eps_i, p_j * inverse_rho*eps_i);
// edge_ij.Add_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// std::cout << i_node << "rhs =" << rhs_i << "after Gp" << std::endl;
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
// std::cout << i_node << "rhs =" << rhs_i << "after viscous" << std::endl;
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
// edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i,p_i, a_j, U_j,p_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// double beta = 1.0;
// double beta = beta_i;
// if(beta_j > beta)
// beta = beta_j;
// beta = 1.0;
// edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
// std::cout << i_node << "rhs =" << rhs_i << "after stab" << std::endl;
//add tau2 term
// boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& LL = edge_ij.LaplacianIJ;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// double aaa = 0.0;
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// aaa += LL(k_comp,m_comp) * (U_j[m_comp] - U_i[m_comp]);
// rhs_i[k_comp] -= tau2_i*aaa;
// }
}
// std::cout << i_node << "rhs =" << rhs_i << std::endl;
}
}
//apply wall resistance
if (mWallLawIsActive == true)
ComputeWallResistance (vel,diag_stiffness);
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
void SolveStep2(typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
typedef Node < 3 > PointType;
typedef GlobalPointersVector<PointType > PointVector;
typedef PointVector::iterator PointIterator;
//reset is visited flag
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//Re-generate a container with LAYER 0 and LAYER 1 after convection of the free surface
std::vector< PointVector > layers(2);
//detect the nodes inside the fluid surface LAYER_0
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) < 0.0) //candidates are only the ones inside the fluid domain
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->FastGetSolutionStepValue(DISTANCE) >= 0.0) //add the node as free surface if one of its neighb is outside
{
if (inode->GetValue(IS_VISITED) == 0.0)
{
layers[0].push_back(*(inode.base()));
inode->GetValue(IS_VISITED) = 1.0;
}
}
}
}
else
inode->FastGetSolutionStepValue(PRESSURE) = 0.0;
}
//fill layer 1 by neighbour relationships
for (PointIterator iii = (layers[0]).begin(); iii != (layers[0]).end(); iii++)
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator jjj = neighb_nodes.begin(); jjj != neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin
{
if (jjj->FastGetSolutionStepValue(DISTANCE) >= 0 &&
jjj->GetValue(IS_VISITED) == 0.0)
{
layers[1].push_back(Node<3>::WeakPointer(*jjj.base()));
jjj->GetValue(IS_VISITED) = 2.0;
}
}
}
/* for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++) {
// std::cout << iii->Id() << " " << std::endl;
const array_1d<double, 3 > & coords_top = iii->Coordinates();
//extrapolate the average velocity
noalias(aux) = ZeroVector(3);
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) < (il + 1) && i->GetValue(IS_VISITED) != 0.0) {*/
//on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0
for (PointIterator iii = layers[1].begin(); iii != layers[1].end(); iii++)
{
//get the node
unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
array_1d<double, TDim> grad_d;
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2(grad_d);
if(norm_grad < 100.0)
{
grad_d /= norm_grad; //this is the direction of the gradient of the distances
grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface
//array_1d<double, TDim> press_grad;
double pestimate = 0.0;
const array_1d<double, 3> & r_press_proj = iii->FastGetSolutionStepValue(PRESS_PROJ);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pestimate += r_press_proj[l_comp]*grad_d[l_comp];
// press_grad[l_comp]= r_press_proj[l_comp];
iii->FastGetSolutionStepValue(PRESSURE) = pestimate;
}
else
{
std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl;
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) == 1.0)
{
pavg += i->FastGetSolutionStepValue(PRESSURE);
avg_number += 1.0;
}
}
if(avg_number == 0)
KRATOS_THROW_ERROR(std::logic_error,"can not happen that the extrapolation node has no neighbours","");
iii->FastGetSolutionStepValue(PRESSURE) = pavg/avg_number;
}
}
//if a node is very close to the free surface (relatively to the element size) fix the pressure on it
// for(ModelPart::NodesContainerType::iterator iii = mr_model_part.NodesBegin(); iii!=mr_model_part.NodesEnd(); iii++)
// {
// unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
//
// double dist = mdistances[i_node];
// if(dist > 0.0 && dist < 0.01*mHavg[i_node])
// iii->FastGetSolutionStepValue(PRESSURE) = 0.0;
//
// }
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize(n_nodes,false);
rhs.resize(n_nodes,false);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
// double time_inv = 0.0; //1.0/delta_t;
//read the pressure projection from the database
#endif
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
//for (int i_node = 0; i_node < n_nodes; i_node++)
// std::cout << mvel_n1[i_node] << std::endl;
//loop over all nodes
// double rho_inv = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
const double& p_i = mPn1[i_node];
const double& p_old_i = mPn[i_node];
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
// const double& eps_i = mEps[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
double l_ii = 0.0;
// double div_i = 0.0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
const double& p_old_j = mPn[j_neighbour];
const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
const array_1d<double, TDim>& xi_j = mXi[j_neighbour];
// const double& eps_j = mEps[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.25*(mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = 0.5*mTauPressure[i_node];
#endif
// double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j);
//
if(edge_tau < delta_t) edge_tau=delta_t;
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian(sum_l_ikjk);
// double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau);
double sum_l_ikjk_onlydt = sum_l_ikjk * (delta_t);
sum_l_ikjk *= (delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
// rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i);
rhs_i -= sum_l_ikjk * (p_j - p_i);
rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i);
//calculating the divergence of the fract vel
// edge_ij.Sub_D_v(div_i, U_i_curr*mRho*eps_i, U_j_curr * mRho*eps_j);
edge_ij.Sub_D_v(rhs_i, U_i_curr*mRho, U_j_curr * mRho);
// edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i);
//high order stabilizing term
double temp = 0.0;
// edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j);
edge_ij.Add_div_v(temp, xi_i, xi_j);
rhs_i += edge_tau * temp;
//assemble laplacian matrix
mL(i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
// //area correction to prevent mass loss
// rhs_i -= mdiv_error[i_node];
// rhs_i += div_i * eps_i;
mL(i_node, i_node) = l_ii;
}
if(muse_mass_correction == true)
{
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i -= mdiv_error[i_node];
}
}
//find the max diagonal term
double max_diag = 0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double L_diag = mL(i_node, i_node);
if (fabs(L_diag) > fabs(max_diag)) max_diag = L_diag;
}
if(max_diag < 1e20) max_diag=1e20;
//respect pressure boundary conditions by penalization
// double huge = max_diag * 1e6;
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = huge;
// rhs[i_node] = 0.0;
// }
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL(i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mL(i_node, j_neighbour) = 0.0;
}
}
//modification for level_set
// mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
// for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
// {
// if(mdistances[i_dist] >= 0)
// {
// mL(i_dist, i_dist) = huge;
// rhs[i_dist] = 0.0;
// }
// }
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
if (mdistances[i_node] >= 0)
{
mL(i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mL(i_node, j_neighbour) = 0.0;
}
}
else
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if (mdistances[j_neighbour] >= 0)
mL(i_node, j_neighbour) = 0.0;
}
}
}
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// if( fabs(mL(i_node, i_node)) < 1e-20)
// {
// mL(i_node, i_node)=max_diag;
// rhs[i_node] = 0.0;
// KRATOS_WATCH("arghhhhhhhhhhhhhhhhhhhhhhhhhhhhhh");
// }
// }
//compute row scaling factors
TSystemVectorType scaling_factors(n_nodes);
double* Lvalues = mL.value_data().begin();
SizeType* Lrow_indices = mL.index1_data().begin();
SizeType* Lcol_indices = mL.index2_data().begin();
#pragma omp parallel for
for (int k = 0; k < static_cast< int>(mL.size1()); k++)
{
double t = 0.0;
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
for (SizeType j=col_begin; j<col_end; j++)
if( static_cast<int>(Lcol_indices[j]) == k)
{
t = fabs(Lvalues[j]);
break;
}
// t += Lvalues[j]*Lvalues[j];
// t = sqrt(t);
scaling_factors[k] = 1.0/sqrt(t);
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(mL.size1()); k++)
{
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
double k_factor = scaling_factors[k];
rhs[k] *= k_factor;
for (SizeType j=col_begin; j<col_end; j++)
{
Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor;
}
}
//set starting vector for iterative solvers
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
pLinearSolver->Solve(mL, dp, rhs);
//update pressure
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node]*scaling_factors[i_node];
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
// {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mPn1[i_node] = mPressureOutlet[i_pressure];
// }
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes);
//compute pressure proj for the next step
#pragma omp parallel for private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double& p_i = mPn1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(xi_i, p_i, p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
}
mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi, rNodes);
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double factor = 0.5;
if(massume_constant_dp == true)
factor = 1.0;
//compute end of step momentum
double rho_inv = 1.0 / mRho;
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor;
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
// edge_ij.Sub_grad_p(correction,delta_p_i,delta_p_j);
edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j);
// edge_ij.Add_grad_p(correction, delta_p_i, delta_p_j);
// edge_ij.Add_Gp(correction,delta_p_i,delta_p_j);
// edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j);
}
//compute prefactor
// double coefficient = delta_t * m_inv;
const double m = mr_matrix_container.GetLumpedMass() [i_node];
const double& d = mdiag_stiffness[i_node];
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
{
U_i_curr[comp] += delta_t / (m + delta_t*d) * correction[comp];
}
}
}
ApplyVelocityBC(mvel_n1);
//write velocity of time step n+1 to Kratos
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
//calculate the error on the divergence
if(muse_mass_correction == true)
{
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const double dist = mdistances[i_node];
double& div_i_err = mdiv_error[i_node];
div_i_err = 0.0;
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_D_v(div_i_err, U_i_curr*mRho, U_j_curr * mRho);
}
}
}
}
KRATOS_CATCH("")
}
//************************************
void ApplyVelocityBC(CalcVectorType& VelArray)
{
KRATOS_TRY
if(mWallLawIsActive == false)
{
//apply conditions on corner edges
int edge_size = medge_nodes_direction.size();
#pragma omp parallel for firstprivate(edge_size)
for (int i = 0; i < edge_size; i++)
{
int i_node = medge_nodes[i];
const array_1d<double, TDim>& direction = medge_nodes_direction[i];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
array_1d<double, TDim>& U_i = VelArray[i_node];
double temp=0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
temp += U_i[comp] * direction[comp];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = direction[comp]*temp;
}
}
//apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = 0.0;
}
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
array_1d<double, TDim>& U_i = VelArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
}
//fixed condition
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
array_1d<double, TDim>& u_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i[comp] = u_i_fix[comp];
}
}
KRATOS_CATCH("")
}
//********************************
//function to compute coefficients
void ExtrapolateValues(unsigned int extrapolation_layers)
{
KRATOS_TRY
//ensure that corner nodes are wet if all of the nodes around them have a negative distance
typedef Node < 3 > PointType;
typedef GlobalPointersVector<PointType > PointVector;
typedef PointVector::iterator PointIterator;
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances,mr_model_part.Nodes());
// mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi,mr_model_part.Nodes());
//
// //loop on all the slip nodes and Set the pressure projection to -BodyForce if it has neighbours with distance greater than 0
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double dist = mdistances[i_node];
//
//
// if(dist <= 0.0)
// {
// int nout = 0;
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// //get global index of neighbouring node j
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// const double& dist_j = mdistances[j_neighbour];
//
// if(dist_j > 0)
// nout++;
// }
//
// if(nout > 0) mXi[i_node] += mRho*mBodyForce;
// }
// }
//
// mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi,mr_model_part.Nodes());
//reset is visited flag
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//generate a container with the layers to be extrapolated
std::vector< PointVector > layers(extrapolation_layers);
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) < 0.0) //candidates are only the ones inside the fluid domain
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->FastGetSolutionStepValue(DISTANCE) >= 0.0) //add the node as free surface if one of its neighb is outside
{
if (inode->GetValue(IS_VISITED) == 0.0)
{
layers[0].push_back(*(inode.base()));
inode->GetValue(IS_VISITED) = 1.0;
}
}
}
}
else
{
//set everything to zero
noalias(inode->FastGetSolutionStepValue(VELOCITY)) = ZeroVector(3);
inode->FastGetSolutionStepValue(PRESSURE) = 0.0;
noalias(inode->FastGetSolutionStepValue(VELOCITY, 1)) = ZeroVector(3);
inode->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
noalias(inode->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
noalias(inode->FastGetSolutionStepValue(PRESS_PROJ, 1)) = ZeroVector(3);
}
}
//fill the following layers by neighbour relationships
//each layer fills the following
for (unsigned int il = 0; il < extrapolation_layers - 1; il++)
{
for (PointIterator iii = (layers[il]).begin(); iii != (layers[il]).end(); iii++)
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator jjj = neighb_nodes.begin(); jjj != neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin
{
if (jjj->FastGetSolutionStepValue(DISTANCE) >= 0 &&
jjj->GetValue(IS_VISITED) == 0.0)
{
layers[il + 1].push_back(Node<3>::WeakPointer(*jjj.base()));
jjj->GetValue(IS_VISITED) = double(il + 2.0);
}
}
}
}
array_1d<double, 3 > aux, aux_proj;
//TESTING!!!
//fill the pressure projection on the first layer inside the fluid
//by extrapolating from the pressure projection on the layer -1 (the first layer completely inside the domain)
for (PointIterator iii = (layers[0]).begin(); iii != (layers[0]).end(); iii++)
{
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) == 0.0) //the node will be considered for extrapolation only if completely inside
{
const array_1d<double, 3 > & inside_press_grad = i->FastGetSolutionStepValue(PRESS_PROJ);
noalias(aux_proj) += inside_press_grad;
avg_number += 1.0;
}
}
if (avg_number != 0.0) //this case means that it has some neighbours that are completely internal
{
aux_proj /= avg_number;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = aux_proj;
}
else //case in which there is not a layer of nodes completely internal
{
array_1d<double,3>& pproj = iii->FastGetSolutionStepValue(PRESS_PROJ);
for(unsigned int i=0; i<TDim; i++)
pproj[i] = mRho*mBodyForce[i];
// noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = mRho*mBodyForce;
}
}
//perform extrapolation layer by layer by making an average
//of the neighbours of lower order
for (unsigned int il = 1; il < extrapolation_layers; il++)
{
// std::cout << "layer " << il << std::endl;
for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
{
// std::cout << iii->Id() << " " << std::endl;
const array_1d<double, 3 > & coords_top = iii->Coordinates();
//extrapolate the average velocity
noalias(aux) = ZeroVector(3);
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) < (il + 1) && i->GetValue(IS_VISITED) != 0.0)
{
const array_1d<double, 3 > & coords_bottom = i->Coordinates();
array_1d<double, 3 > direction_vec = coords_top;
noalias(direction_vec) -= coords_bottom;
const array_1d<double, 3 > & press_grad = i->FastGetSolutionStepValue(PRESS_PROJ);
double temp = inner_prod(direction_vec, press_grad);
double pestimate = i->FastGetSolutionStepValue(PRESSURE,1) + temp;
pavg += pestimate;
noalias(aux_proj) += press_grad;
noalias(aux) += i->FastGetSolutionStepValue(VELOCITY);
avg_number += 1.0;
}
}
if (avg_number != 0.0)
{
aux /= avg_number;
pavg /= avg_number;
aux_proj /= avg_number;
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "error in extrapolation:: no neighbours find on a extrapolation layer -- impossible", "");
// KRATOS_THROW_ERROR(std:logic_error,"error in extrapolation:: no neighbours find on a extrapolation layer -- impossible","");
}
noalias(iii->FastGetSolutionStepValue(VELOCITY)) = aux;
noalias(iii->FastGetSolutionStepValue(VELOCITY, 1)) = aux;
iii->FastGetSolutionStepValue(PRESSURE, 1) = pavg;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = aux_proj;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ, 1)) = aux_proj;
}
}
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
// //on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0
// for (PointIterator iii = layers[1].begin(); iii != layers[1].end(); iii++)
// {
// //get the node
// unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
//
// array_1d<double, TDim> grad_d;
// for (unsigned int comp = 0; comp < TDim; comp++)
// grad_d[comp] = 0.0;
//
// double dist_i = mdistances[i_node];
//
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// //get global index of neighbouring node j
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// const double& dist_j = mdistances[j_neighbour];
//
// //projection of pressure gradients
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//
// edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
// }
//
// const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// grad_d[l_comp] *= m_inv;
//
// double norm_grad = norm_2(grad_d);
//
// if(norm_grad < 100.0)
// {
// grad_d /= norm_grad; //this is the direction of the gradient of the distances
//
// grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface
//
// const array_1d<double, TDim> press_grad = iii->FastGetSolutionStepValue(PRESS_PROJ);
// double pestimate = inner_prod(press_grad,grad_d);
//
// iii->FastGetSolutionStepValue(PRESSURE) = pestimate;
// }
// else
// {
// std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl;
// double avg_number = 0.0;
//
// double pavg = 0.0;
//
// GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
// for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
// {
// if (i->GetValue(IS_VISITED) == 1) {
// pavg += i->FastGetSolutionStepValue(PRESSURE);
// avg_number += 1.0;
// }
// }
//
// if(avg_number == 0)
// KRATOS_THROW_ERROR(std::logic_error,"can not happen that the extrapolation node has no neighbours","");
//
// iii->FastGetSolutionStepValue(PRESSURE) = pavg/avg_number;
//
// }
//
// }
//
//
// //set the pressure to zero on the outer layers (>2)
// for (unsigned int il = 2; il < extrapolation_layers; il++)
// {
// for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
//
// {
// iii->FastGetSolutionStepValue(PRESSURE) = 0.0;
// }
// }
//mark nodes on which we will have to solve for convection
//mark all of internal nodes
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.NodesBegin();
for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++)
{
ModelPart::NodesContainerType::iterator it = it_begin+i_node;
if(it->FastGetSolutionStepValue(DISTANCE) <= 0.0)
it->GetValue(IS_VISITED) = 1.0;
else
it->GetValue(IS_VISITED) = 0.0;
}
//now mark all of the nodes up to the extrapolation layers - 1
for (unsigned int il = 0; il < extrapolation_layers-1; il++)
for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
iii->GetValue(IS_VISITED) = 1.0;
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
ApplyVelocityBC(mvel_n1);
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
KRATOS_CATCH("")
}
void ChangeSignToDistance()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
inode->FastGetSolutionStepValue(DISTANCE) = -dist;
}
KRATOS_CATCH("")
}
void MarkNodesByDistance(double min, double max)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
if (dist > min && dist < max)
inode->GetValue(IS_VISITED) = 1.0;
else
inode->GetValue(IS_VISITED) = 0.0;
}
KRATOS_CATCH("")
}
void SaveScalarVariableToOldStep(Variable<double>& rVar)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->FastGetSolutionStepValue(rVar, 1) = inode->FastGetSolutionStepValue(rVar);
}
KRATOS_CATCH("")
}
void MarkExternalAndMixedNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) > 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1.0;
}
}
}
KRATOS_CATCH("")
}
void MarkInternalAndMixedNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1.0;
}
}
}
KRATOS_CATCH("")
}
void MarkInternalNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
}
}
KRATOS_CATCH("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//2D case
if (TDim == 2)
{
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal2D(cond_it, area_normal);
}//3D case
else if (TDim == 3)
{
//help vectors for cross product
array_1d<double, 3 > v1;
array_1d<double, 3 > v2;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal3D(cond_it, area_normal, v1, v2);
}
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mInOutNormal.resize(n_nodes);
mSlipNormal.resize(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias(mSlipNormal[i_node]) = ZeroVector(TDim);
mis_slip[i_node] = false;
noalias(mInOutNormal[i_node]) = ZeroVector(TDim);
}
//loop over all faces
const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//slip condition
if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& slip_normal = mSlipNormal[i_node];
mis_slip[i_node] = true;
for (unsigned int comp = 0; comp < TDim; comp++)
{
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of slip nodes
std::vector< unsigned int> tempmSlipBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmSlipBoundaryList.push_back(i_node);
mis_slip[i_node] = false;
}
mSlipBoundaryList.resize(tempmSlipBoundaryList.size(),false);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempmSlipBoundaryList.size()); i++)
mSlipBoundaryList[i] = tempmSlipBoundaryList[i];
//loop over all faces to fill inlet outlet
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//inlet or outlet condition
bool is_inlet_or_outlet = false;
if (cond_it->GetValue (IS_STRUCTURE) != true) is_inlet_or_outlet = true;
else
{
for (unsigned int if_node = 0; if_node < TDim; if_node++)
if (face_geometry[if_node].IsFixed (VELOCITY_X) )
is_inlet_or_outlet = true;
}
//slip condition
if (is_inlet_or_outlet) //the opposite of the loop before
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& inout_normal = mInOutNormal[i_node];
mis_slip[i_node] = true; //reutilize it!
for (unsigned int comp = 0; comp < TDim; comp++)
{
inout_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of inlet outlet nodes nodes
std::vector< unsigned int> tempmInOutBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmInOutBoundaryList.push_back(i_node);
}
mInOutBoundaryList.resize(tempmInOutBoundaryList.size(),false);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempmInOutBoundaryList.size()); i++)
mInOutBoundaryList[i] = tempmInOutBoundaryList[i];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mViscosity.clear();
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mFixedVelocities.clear();
mFixedVelocitiesValues.clear();
mPressureOutletList.clear();
// mPressureOutlet.clear();
mSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mTau2.clear();
mBeta.clear();
mPiConvection.clear();
mphi_n.clear();
mphi_n1.clear();
mEps.clear();
mA.clear();
mB.clear();
mStrVel.clear();
mdiv_error.clear();
mdiag_stiffness.clear();
mis_slip.clear();
KRATOS_CATCH ("")
}
void ConvectDistance()
{
KRATOS_TRY
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
ValuesVectorType rhs, WorkConvection;
rhs.resize(n_nodes);
WorkConvection.resize(n_nodes);
ValuesVectorType active_nodes;
active_nodes.resize(n_nodes);
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//mr_matrix_container.AssignVectorToVector(mphi_n1, mphi_n); //mWork = mphi_n
// //chapuza
// //set the distance to zero when it tries to go out of the pressure boundary
// int pressure_size = mPressureOutletList.size();
// #pragma omp parallel for firstprivate(pressure_size)
// for (int iii = 0; iii < pressure_size; iii++)
// {
// unsigned int i_node = mPressureOutletList[iii];
// mphi_n1[i_node] = fabs(mphi_n1[i_node]);
// mphi_n[i_node] = fabs(mphi_n[i_node]);
// }
//create and fill a vector of nodes for which we want to convect the velocity
for (int i_node = 0; i_node < n_nodes; i_node++)
{
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.NodesBegin();
active_nodes[i_node] = (it_begin + i_node)->GetValue(IS_VISITED);
}
// //calculating the convective projection
// array_1d<double, TDim> a_i;
// array_1d<double, TDim> a_j;
// #pragma omp parallel for private(a_i,a_j)
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double& pi_i = mPiConvection[i_node];
// const double& phi_i = mphi_n1[i_node];
// //set to zero the projection
// pi_i = 0.0;
// if (active_nodes[i_node] != 0.0)
// {
// a_i = mvel_n1[i_node];
// a_i /= mEps[i_node];
//
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// if (active_nodes[j_neighbour] != 0.0)
// {
// noalias(a_j) = mvel_n1[j_neighbour];
// a_j /= mEps[j_neighbour];
//
// const double& phi_j = mphi_n1[j_neighbour];
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
// edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j);
// }
// }
// //apply inverted mass matrix
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// pi_i *= m_inv;
// }
// }
//calculating the convective projection
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPiConvection[i_node];
// setting to zero the projection
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
/* if (active_nodes[i_node] != 0.0)
{*/
const double& phi_i = mphi_n1[i_node];
noalias(a_i) = mvel_n1[i_node];
a_i /= mEps[i_node];
// loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
noalias(a_j) = mvel_n1[j_neighbour];
a_j /= mEps[j_neighbour];
const double& phi_j = mphi_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(pi_i, phi_i, phi_j);
}
// apply inverted mass matrix
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
// }
}
//calculating limitor
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& pi_i = mPiConvection[i_node];
const double& p_i = mphi_n1[i_node];
double& beta_i = mBeta[i_node];
beta_i = 0.0;
double n = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mphi_n1[j_neighbour];
const array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
const array_1d<double, TDim>& pi_j = mPiConvection[j_neighbour];
// double proj = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// proj += 0.5*l_k[comp]*(pi_i[comp]+pi_j[comp]);
// double beta = fabs((p_i - p_j - proj)/(fabs(p_i-p_j)+fabs(proj)+1e-4));
double proj = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
proj += 0.5 * l_k[comp]*(pi_i[comp] + pi_j[comp]);
// proj += dir[comp]*pi_i[comp];
double numerator = fabs(fabs(p_j - p_i) - fabs(proj));
double denom = fabs(fabs(p_j - p_i) + 1e-6);
beta_i += numerator / denom;
n += 1.0;
}
beta_i /= n;
if (beta_i > 1.0)
beta_i = 1.0;
}
// mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, active_nodes, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
mr_matrix_container.AssignVectorToVector(mphi_n, WorkConvection); //mWork = mphi_n
//first step of Runge Kutta
// mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//third step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//fourth step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(WorkConvection, mphi_n1);
// // make sure that boundary nodes that are very close to the free surface get wet
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++) {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
//
// }
// int fixed_size = mFixedVelocities.size();
// #pragma omp parallel for firstprivate(fixed_size)
// for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) {
// unsigned int i_node = mFixedVelocities[i_velocity];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
// }
//wetten corner nodes if needed
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
bool to_be_wettened = true;
double min_dist = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double neighb_dist = mphi_n1[j_neighbour];
if(min_dist > neighb_dist)
min_dist = neighb_dist;
if(neighb_dist >= 0.0)
{
to_be_wettened=false;
}
}
if(to_be_wettened==true)
mphi_n1[i_node] = min_dist;
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
KRATOS_CATCH("")
}
void ReduceTimeStep(ModelPart& rModelPart, double NewTime)
{
KRATOS_TRY
/*
double current_time = rModelPart.GetProcessInfo()[TIME];
double current_delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
double old_time = current_time - current_delta_time;
double new_reduced_time = NewTtime;
double new_delta_time = new_reduced_time - old_time;
rModelPart.GetProcessInfo()[TIME] = new_reduced_time;
rModelPart.GetProcessInfo()[DELTA_TIME] = new_delta_time;
//now copy the database from the old step on the top of the current step
int step_data_size = ThisModelPart.GetNodalSolutionStepDataSize();
double* current_data = (pnode)->SolutionStepData().Data(0);
double* old_data = (pnode)->SolutionStepData().Data(1);
for (int j = 0; j < step_data_size; j++)
current_data[j] = old_data[j];
*/
rModelPart.OverwriteSolutionStepData(1, 0);
rModelPart.GetProcessInfo().SetCurrentTime(NewTime);
KRATOS_CATCH("error in reducing the time step")
}
bool CheckDistanceConvection()
{
int n_large_distance_gradient = 0;
array_1d<double, TDim> grad_d;
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//calculate gradient of distance on the nodes and count occurrences of large gradients (that indicate a failure)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0)
{
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2(grad_d);
if (norm_grad > 1.5) //large gradient found
n_large_distance_gradient += 1;
}
}
if (n_large_distance_gradient != 0)
{
bool success = false;
return success;
}
else
{
bool success = true;
return success;
}
}
void ActivateWallResistance(double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
}
double ComputeVolumeVariation()
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double dt = CurrentProcessInfo[DELTA_TIME];
//slip condition
int inout_size = mInOutBoundaryList.size();
double vol_var = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < inout_size; i++)
{
unsigned int i_node = mInOutBoundaryList[i];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
double projection_length = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
}
vol_var += projection_length;
}
}
return vol_var * dt;
}
double ComputeWetVolume()
{
KRATOS_TRY
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//slip condition
double wet_volume = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < static_cast<int>(mdistances.size()); i++)
{
double dist = mdistances[i];
const double m_inv = mr_matrix_container.GetInvertedMass()[i];
if (dist <= 0.0)
{
wet_volume += 1.0 / m_inv;
}
}
return wet_volume;
KRATOS_CATCH("");
}
void DiscreteVolumeCorrection(double expected_volume, double measured_volume)
{
// std::cout << "measured_volume: " << measured_volume << ", expected_volume: " << expected_volume << std::endl;
double volume_error = expected_volume - measured_volume;
if (measured_volume < expected_volume)
{
double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
// find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] <= 0.0)
{
const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass()[i_node];
if(nodal_mass < volume_error - layer_volume)
{
first_outside.push_back(i_node);
layer_volume += nodal_mass;
}
//const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//layer_volume += 1.0/m_inv;
}
}
}
}
// std::cout << ", layer_volume: " << layer_volume << std::endl;
// if (measured_volume + layer_volume <= expected_volume)
{
// mark the nodes in the outside layer with a small negative distance
for(unsigned int i=0; i<first_outside.size(); i++)
{
unsigned int i_node = first_outside[i];
mdistances[i_node] = -mHavg[i_node];
}
}
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
}
void PushFreeSurface()
{
//double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
//find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] <= 0.0)
{
//mark the nodes in the outside layer with a small negative distance
mdistances[i_node] = -mHavg[i_node];
}
}
}
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
}
//***************************************
//function to set adequate time step size
double ComputeBoundedTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;//max_dt;
mdelta_t_avg = 1e10;//max_dt;
//getting value of current velocity and of viscosity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, mr_model_part.Nodes());
// mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, mr_model_part.Nodes());
// double delta_t_i = delta_t;
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
const double eps_i = mEps[i_node];
const double nu_i = mViscosity[i_node];
// const double d_i = mD[i_node];
// const double lindarcy_i = mA[i_node];
// const double nonlindarcy_i = mB[i_node];
// double vel_norm = norm_2(v_i);
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += v_i[l_comp]*v_i[l_comp];
}
vel_norm = sqrt(vel_norm);
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// array_1d<double, TDim> rel_vel_i;
// for(unsigned int comp = 0; comp < TDim; comp++)
// {rel_vel_i[comp] = v_i[comp] - str_v_i[comp];}
// double rel_vel_norm = norm_2(rel_vel_i);
//// double porosity_coefficient = ComputePorosityCoefficient(mViscosity, vel_norm, eps_i, d_i);
// double porosity_coefficient = ComputePorosityCoefficient(rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
/*KRATOS_WATCH("porosity_coefficient ----------- Timestep")
KRATOS_WATCH(porosity_coefficient)*/
vel_norm /= eps_i;
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * nu_i / (hmin_i * hmin_i) /*+ porosity_coefficient*/);
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * nu_i / (havg_i * havg_i) /*+ porosity_coefficient*/);
if(delta_t_i < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_i)
v_i *= delta_t_i / 10e-8;
delta_t_i = 10e-8;
}
if(delta_t_i_avg < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delta_t_i_avg to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delta_t_i_avg to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_i_avg)
v_i *= delta_t_i_avg / 10e-8;
delta_t_i_avg = 10e-8;
}
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
v_diff_norm /= eps_i;
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu_i / (hmin_i * hmin_i));
if(delta_t_j < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delta_t_j to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_j)
v_j *= delta_t_j / 10e-8;
delta_t_j = 10e-8;
}
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
// if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0))
// {
// double delta_t_j = CFLNumber * 1.0 / (2.0 * norm_2(v_diff) /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i));
//// double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par)) / mHmin[i_node] + 2.0 * mViscosity / (mHmin[i_node] * mHmin[i_node]));
// // KRATOS_WATCH(delta_t_j);
// // KRATOS_WATCH(delta_t_i);
// if (delta_t_j < delta_t_i)
// delta_t_i = delta_t_j;
// }
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
if(delta_t <= 10-7) // writing back the changed velocities
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
return delta_t;
KRATOS_CATCH("")
}
void CalculatePorousResistanceLaw(unsigned int res_law)
{
// const double nu_i = mViscosity;
if(res_law == 1)
{
/* if the chosen resistance law is ERGUN calculate Ergun A and B*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue(POROSITY);/*reading from kratos database*/
const double d = inode->FastGetSolutionStepValue(DIAMETER);/*reading from kratos database*/
const double nu = inode->FastGetSolutionStepValue(VISCOSITY);/*reading from kratos database*/
double& a = inode-> FastGetSolutionStepValue(LIN_DARCY_COEF);/*changing kratos database*/
double& b = inode-> FastGetSolutionStepValue(NONLIN_DARCY_COEF);/*changing kratos database*/
if(eps < 1.0)
{
double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d);
a = nu * k_inv;
b = (1.75 / eps) * sqrt(k_inv / (150.0 * eps));
}
else
{
a = 0.0;
b = 0.0;
}
}
}
else
{
/* whether it is a Custom Resistance law or NO resistance law is present ---> set to zero A and B for non porous nodes*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue(POROSITY); /*reading from kratos database*/
double& a = inode-> FastGetSolutionStepValue(LIN_DARCY_COEF); /*changing kratos database*/
double& b = inode-> FastGetSolutionStepValue(NONLIN_DARCY_COEF); /*changing kratos database*/
if(eps == 1.0)
{
a = 0.0;
b = 0.0;
}
}
}
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes()); /*filling edgebased database reading from kratos database*/
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes()); /*filling edgebased database reading from kratos database*/
}
private:
double mMolecularViscosity;
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool muse_mass_correction;
//parameters controlling the wall law
bool mWallLawIsActive;
double mY_wall;
//parameters for controlling the usage of the delta time in the stabilization
double mstabdt_pressure_factor;
double mstabdt_convection_factor;
double medge_detection_angle;
double mtau2_factor;
bool massume_constant_dp;
//nodal values
ValuesVectorType mViscosity;
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mx;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1;
//coefficients
ValuesVectorType mdistances;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
CalcVectorType mEdgeDimensions;
//area normal
CalcVectorType mSlipNormal;
CalcVectorType mInOutNormal;
//projection terms
CalcVectorType mPi, mXi;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities, mInOutBoundaryList;
CalcVectorType mFixedVelocitiesValues;
// ValuesVectorType mPressureOutlet;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
ValuesVectorType mTau2;
ValuesVectorType mdiv_error;
std::vector<bool> mis_slip;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//constant variables
double mRho;
//double mViscosity;
array_1d<double, TDim> mBodyForce;
//variables for convection
ValuesVectorType mphi_n;
ValuesVectorType mphi_n1;
CalcVectorType mPiConvection;
ValuesVectorType mBeta;
//variables for edge BCs
IndicesVectorType medge_nodes;
CalcVectorType medge_nodes_direction;
IndicesVectorType mcorner_nodes;
ValuesVectorType mEps;
ValuesVectorType mdiag_stiffness;
// ValuesVectorType mD;
ValuesVectorType mA;
ValuesVectorType mB;
CalcVectorType mStrVel;
double mdelta_t_avg;
double max_dt;
double mshock_coeff;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = -(face_geometry[1].X() - face_geometry[0].X());
area_normal[2] = 0.00;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct(area_normal, v1, v2);
area_normal *= -0.5;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, 3 > > position;
position.resize(n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//save its coordinates locally
noalias(position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
//take unstructured meshes into account
if (TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt(2.0 * m_i);
}
}
else if (TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = pow(6.0 * m_i, 1.0 / 3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, 3 > & pos_i = position[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, 3 > & pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS_convection(
const ValuesVectorType& mphi,
const CalcVectorType& convective_velocity,
ValuesVectorType& rhs,
ValuesVectorType& active_nodes
)
{
KRATOS_TRY
int n_nodes = mphi.size();
// //calculating the convective projection
//#pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
//
// double& pi_i = mPiConvection[i_node];
// const double& phi_i = mphi[i_node];
//
// //set to zero the projection
// pi_i = 0;
// if (active_nodes[i_node] != 0.0)
// {
//
// const array_1d<double, TDim>& a_i = convective_velocity[i_node];
//
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// if (active_nodes[j_neighbour] != 0.0)
// {
// const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
// const double& phi_j = mphi[j_neighbour];
//
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//
// edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j);
// }
// }
//
// //apply inverted mass matrix
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// pi_i *= m_inv;
// }
// // KRATOS_WATCH(pi_i);
// // num = fabs(num);
// // if(num > norm_vI*0.0001)
// // mBeta[i_node] = 1.0 - num/denom;
// // else
// // mBeta[i_node] = 1.0;
//
// }
//perform MPI syncronization
//calculating the RHS
double stab_low;
double stab_high;
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(stab_low,stab_high,a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
const double& h_i = mHavg[i_node];
const double& phi_i = mphi[i_node];
noalias(a_i) = convective_velocity[i_node];
a_i /= mEps[i_node];
const array_1d<double, TDim>& proj_i = mPiConvection[i_node];
// const double& pi_i = mPiConvection[i_node];
double pi_i = proj_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_i += proj_i[l_comp] * a_i[l_comp];
// double beta = mBeta[i_node];
rhs_i = 0.0;
if (active_nodes[i_node] != 0.0)
{
const double& beta = mBeta[i_node];
double norm_a = a_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
norm_a += a_i[l_comp] * a_i[l_comp];
norm_a = sqrt(norm_a);
//loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if (active_nodes[j_neighbour] != 0.0)
{
//double& rhs_j = rhs[j_neighbour];
const double& phi_j = mphi[j_neighbour];
noalias(a_j) = convective_velocity[j_neighbour];
a_j /= mEps[j_neighbour];
// const double& pi_j = mPiConvection[j_neighbour];
const array_1d<double, TDim>& proj_j = mPiConvection[j_neighbour];
double pi_j = proj_j[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_j += proj_j[l_comp] * a_i[l_comp];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//convection operator
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, phi_i, a_j, phi_j); //esto funciona
// edge_ij.Sub_D_v(rhs_i, a_i*phi_i, a_i*phi_j);
//calculate stabilization part
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, phi_i, a_j, phi_j);
double edge_tau = mTauConvection[i_node];
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
double coeff = 0.5 * mshock_coeff; //=0.7*0.5;
double laplacian_ij = 0.0;
edge_ij.CalculateScalarLaplacian(laplacian_ij);
double capturing = laplacian_ij * (phi_j - phi_i);
// rhs_i-= coeff*capturing*beta*norm_a*h_i;
double aaa = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
aaa += a_i[k_comp] * a_i[m_comp] * edge_ij.LaplacianIJ(k_comp, m_comp);
if (norm_a > 1e-10)
{
aaa /= (norm_a * norm_a);
double capturing2 = aaa * (phi_j - phi_i);
if (fabs(capturing) > fabs(capturing2))
rhs_i -= coeff * (capturing - capturing2) * beta * norm_a * h_i;
}
}
}
}
// KRATOS_WATCH(rhs_i);
}
KRATOS_CATCH("")
}
//**************************************
void CornerDectectionHelper(Geometry< Node < 3 > >& face_geometry,
const array_1d<double, 3 > & face_normal,
const double An,
const GlobalPointersVector<Condition>& neighb,
const unsigned int i1,
const unsigned int i2,
const unsigned int neighb_index,
std::vector<unsigned int>& edge_nodes,
CalcVectorType& cornern_list
)
{
double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted
double acceptable_cos = cos(acceptable_angle);
if (face_geometry[i1].Id() < face_geometry[i2].Id()) //we do this to add the face ones
{
const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue(NORMAL);
double neighb_An = norm_2(neighb_normal);
double cos_normal = 1.0 / (An * neighb_An) * inner_prod(face_normal, neighb_normal);
//if the angle is too big between the two normals then the edge in the middle is a corner
if (cos_normal < acceptable_cos)
{
array_1d<double, 3 > edge = face_geometry[i2].Coordinates() - face_geometry[i1].Coordinates();
double temp = norm_2(edge);
edge /= temp;
int index1 = face_geometry[i1].FastGetSolutionStepValue(AUX_INDEX);
int index2 = face_geometry[i2].FastGetSolutionStepValue(AUX_INDEX);
edge_nodes[index1] += 1;
edge_nodes[index2] += 1;
// double sign1 = inner_prod(cornern_list[index1], edge);
double sign1 = 0.0;
for(unsigned int i = 0 ; i < edge.size() ; i++)
{sign1 += cornern_list[index1][i]*edge[i];}
if (sign1 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] -= edge[i];
}
double sign2 = inner_prod(cornern_list[index2], edge);
if (sign2 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] -= edge[i];
}
}
}
}
//function to calculate the area normals
void DetectEdges3D(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
std::vector<unsigned int> temp_edge_nodes(n_nodes);
CalcVectorType temp_cornern_list(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
temp_edge_nodes[i_node] = 0.0;
noalias(temp_cornern_list[i_node]) = ZeroVector(TDim);
}
//loop over all faces
// const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
const array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
double An = norm_2(face_normal);
unsigned int current_id = cond_it->Id();
//slip condition
if (cond_it->GetValue(IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours
{
const GlobalPointersVector<Condition>& neighb = cond_it->GetValue(NEIGHBOUR_CONDITIONS);
//check for neighbour zero
if (neighb[0].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list);
//check for neighbour one
if (neighb[1].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list);
//check for neighbour two
if (neighb[2].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list);
}
}
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.WriteVectorToDatabase(ACCELERATION, temp_cornern_list, rNodes);
//fill the list of edge_nodes
std::vector<unsigned int> tempmedge_nodes;
std::vector< array_1d<double,TDim> > tempmedge_nodes_direction;
std::vector<unsigned int> tempmcorner_nodes;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (temp_edge_nodes[i_node] == 2) //node is a edge_node
{
tempmedge_nodes.push_back(i_node);
array_1d<double, TDim>& node_edge = temp_cornern_list[i_node];
node_edge /= norm_2(node_edge);
tempmedge_nodes_direction.push_back(node_edge);
}
else if (temp_edge_nodes[i_node] > 2)
tempmcorner_nodes.push_back(i_node);
}
medge_nodes.resize(tempmedge_nodes.size(),false);
medge_nodes_direction.resize(tempmedge_nodes_direction.size(),false);
mcorner_nodes.resize(tempmcorner_nodes.size(),false);
#pragma omp parallel for
for ( int i = 0; i < static_cast<int>(tempmedge_nodes.size()); i++)
{
medge_nodes[i] = tempmedge_nodes[i];
medge_nodes_direction[i] = tempmedge_nodes_direction[i];
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmcorner_nodes.size()); i++)
{
mcorner_nodes[i] = tempmcorner_nodes[i];
}
for (int i = 0; i < static_cast<int>(mcorner_nodes.size()); i++)
{
KRATOS_WATCH(mcorner_nodes[i]);
}
KRATOS_CATCH("")
}
// double ComputePorosityCoefficient(const double& viscosity, const double& vel_norm, const double& eps, const double& d)
// {
// // const double d = 0.01; //to be changed
// double linear;
// double non_linear;
// if (eps < 1.0)
// {
// double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d);
// linear = eps * viscosity * k_inv; // eps * Ai
// non_linear = (1.75 * vel_norm) * sqrt(k_inv / (150.0 * eps)); //eps * Bi * vel_norm
// // double linear = viscosity * k_inv;
// // double non_linear = (1.75 * vel_norm / eps) * sqrt(k_inv / (150.0 * eps));
// } else
// {
// linear = 0.0;
// non_linear = 0.0;
// }
// return linear + non_linear;
// }
double ComputePorosityCoefficient(const double& vel_norm, const double& eps, const double& a, const double& b)
{
double linear;
double non_linear;
// if (eps < 1.0) /*this check has been already done in calculating the resistance law*/
// {
linear = eps * a;
non_linear = eps * b * vel_norm;
// } else
// {
// linear = 0.0;
// non_linear = 0.0;
// }
return linear + non_linear;
}
// double ComputeStructureContributionToPorosityCoefficient(const double& fluid_vel, const double& str_vel, const double& str_vel_norm, const double& eps, const double& a, const double& b)
// {
//
//
// }
void LaplacianSmooth(ValuesVectorType& to_be_smoothed, ValuesVectorType& aux)
{
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
double correction = 0.0;
const double& origin_i = to_be_smoothed[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& origin_j = to_be_smoothed[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
double l_ikjk;
edge_ij.CalculateScalarLaplacian(l_ikjk);
correction += l_ikjk * (origin_j - origin_i);
}
}
aux[i_node] = origin_i - correction;
}
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
to_be_smoothed[i_node] = aux[i_node];
}
void ComputeWallResistance(
const CalcVectorType& vel,
ValuesVectorType& diag_stiffness
// CalcVectorType& rhs
)
{
//parameters:
double k = 0.41;
double B = 5.1;
double toll = 1e-6;
double ym = mY_wall; //0.0825877; //0.0093823
double y_plus_incercept = 10.9931899;
unsigned int itmax = 100;
if (mViscosity[0] == 0)
KRATOS_THROW_ERROR(std::logic_error, "it is not possible to use the wall law with 0 viscosity", "");
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
const double nu = mViscosity[i_node];
if (dist <= 0.0)
{
//array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt(mod_vel);
area = sqrt(area);
diag_stiffness[i_node] += area * mod_vel /pow(1.0/k*log(100.00) + B,2);/* * mWallReductionFactor[ i_node ];*/
//now compute the skin friction
double mod_uthaw = sqrt(mod_vel * nu / ym);
const double y_plus = ym * mod_uthaw / nu;
if (y_plus > y_plus_incercept)
{
//begin cicle to calculate the real u_thaw's module:
unsigned int it = 0;
double dx = 1e10;
// KRATOS_WATCH(fabs(dx));
while (fabs(dx) > toll * mod_uthaw && it < itmax)
{
double a = 1.0 / k;
double temp = a * log(ym * mod_uthaw / nu) + B;
double y = mod_uthaw * (temp) - mod_vel;
double y1 = temp + a;
dx = y / y1;
mod_uthaw -= dx;
it = it + 1;
}
if (it == itmax)
std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl;
}
// else
// {
// for (unsigned int comp = 0; comp < TDim; comp++)
// rhs_i[comp] -= U_i[comp] * area * mu / (density*ym) ;
// }
/* if (mod_vel > 1e-12)
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= U_i[comp] * area * mod_uthaw * mod_uthaw / (mod_vel);
*/
}
else
diag_stiffness[i_node] += 0.0;
}
}
void ApplySmagorinsky3D (double MolecularViscosity, double Cs)
{
KRATOS_TRY
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
//calculating the RHS
array_1d<double, TDim> grad_vx;
array_1d<double, TDim> grad_vy;
array_1d<double, TDim> grad_vz;
int n_nodes = rNodes.size();
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
array_1d<double, TDim> stab_high;
#pragma omp parallel for private(grad_vx,grad_vy,grad_vz)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] = 0.0 ;
grad_vy[comp] = 0.0 ;
grad_vz[comp] = 0.0 ;
}
//compute node by node the gradients
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double h = mHmin[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]);
edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]);
edge_ij.Add_grad_p (grad_vz, U_i[2], U_j[2]);
}
//finalize computation of the gradients
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] *= m_inv ;
grad_vy[comp] *= m_inv ;
grad_vz[comp] *= m_inv ;
}
//symmetrize and multiply by 2
grad_vx[0] *= 2.0;
grad_vy[1] *= 2.0;
grad_vz[2] *= 2.0;
grad_vx[1] += grad_vy[0];
grad_vx[2] += grad_vz[0];
grad_vy[2] += grad_vz[1];
grad_vy[0] += grad_vx[1];
grad_vz[0] += grad_vx[2];
grad_vz[1] += grad_vy[2];
//compute smagorinsky term
double aux = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
aux += grad_vx[comp] * grad_vx[comp] ;
aux += grad_vy[comp] * grad_vy[comp] ;
aux += grad_vz[comp] * grad_vz[comp] ;
}
aux *= 0.5;
if (aux < 0.0 ) aux=0.0;
double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/;
mViscosity[i_node] = turbulent_viscosity + MolecularViscosity;
}
mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes);
KRATOS_CATCH ("");
}
void ApplySmagorinsky2D (double MolecularViscosity, double Cs)
{
KRATOS_TRY
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
//calculating the RHS
array_1d<double, TDim> grad_vx;
array_1d<double, TDim> grad_vy;
// array_1d<double, TDim> grad_vz;
int n_nodes = rNodes.size();
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
array_1d<double, TDim> stab_high;
#pragma omp parallel for private(grad_vx,grad_vy)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] = 0.0 ;
grad_vy[comp] = 0.0 ;
// grad_vz[comp] = 0.0 ;
}
//compute node by node the gradients
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double h = mHmin[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]);
edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]);
}
//finalize computation of the gradients
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] *= m_inv ;
grad_vy[comp] *= m_inv ;
}
//symmetrize and multiply by 2
grad_vx[0] *= 2.0;
grad_vy[1] *= 2.0;
grad_vx[1] += grad_vy[0];
grad_vy[0] += grad_vx[1];
//compute smagorinsky term
double aux = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
aux += grad_vx[comp] * grad_vx[comp] ;
aux += grad_vy[comp] * grad_vy[comp] ;
}
aux *= 0.5;
if (aux < 0.0 ) aux=0.0;
double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/;
mViscosity[i_node] = turbulent_viscosity + MolecularViscosity;
}
mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes);
KRATOS_CATCH ("");
}
void Add_Effective_Inverse_Multiply (
CalcVectorType& destination,
const CalcVectorType& origin1,
const double value,
const ValuesVectorType& mass,
const ValuesVectorType& diag_stiffness,
const CalcVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& dest = destination[i_node];
const double m = mass[i_node];
const double d = diag_stiffness[i_node];
const array_1d<double, TDim>& origin_vec1 = origin1[i_node];
const array_1d<double, TDim>& origin_value = origin[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = value / (m + value*d) * ( m/value * origin_vec1[comp] + origin_value[comp] );
}
KRATOS_CATCH ("")
}
};
} //namespace Kratos
#undef SYMM_PRESS
#endif //KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED defined
|
panama_fmt_plug.c | /* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_panama_;
#elif FMT_REGISTERS_H
john_register_one(&fmt_panama_);
#else
#include <string.h>
#include "arch.h"
#include "sph_panama.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 1 - 217k
// 64 - 1930k
// 128 - 2099k
// 256 - 2204k *** set to this level
// 512 - 2203k
// 1k - 2124k
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 8
#else
#define OMP_SCALE 256
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_LABEL "Panama"
#define FORMAT_NAME ""
#define FORMAT_TAG "$panama$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests panama__tests[] = {
{"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{"a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"},
{"$panama$a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"},
{"017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"},
{"$panama$017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"},
{"3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"},
{"$panama$3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int extra;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE * 2 + 1);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p = ciphertext + TAG_LENGTH;
int i;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_panama_context ctx;
sph_panama_init(&ctx);
sph_panama(&ctx, saved_key[index], strlen(saved_key[index]));
sph_panama_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void panama_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_panama_ = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
panama__tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
panama_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
mmult.c | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#define NRA 2048 /* number of rows in matrix A */
#define NCA 2048 /* number of columns in matrix A */
#define NCB 2048 /* number of columns in matrix B */
struct timeval startTime;
struct timeval finishTime;
double timeIntervalLength;
__sw_global__ double **a; /* [NRA][NCA] */
__sw_global__ double **b; /* [NCA][NCB] */
__sw_global__ double **c; /* [NRA][NCB] */
__sw_global__ double sum;
void* myMalloc(int size, int info)
{
void* t = (void*)malloc(size);
if(!t)
{
printf("\nMemory allocation error [%d]",info);
fflush(stdout);
exit(0);
}
return t;
}
int main (int argc, char *argv[])
{
__sw_global__ long i, j, k;
sum = 0;
a = (double**)myMalloc(NRA*sizeof(double*),1);
for (i=0;i<NCA;i++)
a[i]=(double*)myMalloc(NCA*sizeof(double),2);
b = (double**)myMalloc(NCA*sizeof(double*),3);
for (i=0;i<NCB;i++)
b[i]=(double*)myMalloc(NCB*sizeof(double),4);
c = (double**)myMalloc(NRA*sizeof(double*),5);
for (i=0;i<NCB;i++)
c[i]=(double*)myMalloc(NCB*sizeof(double),6);
/*** Initialize matrices ***/
for(i = 0; i < NRA; i++)
for(j = 0; j < NCA; j++)
a[i][j] = i + j;
for(i = 0; i < NCA; i++)
for(j = 0; j < NCB; j++)
b[i][j] = i * j;
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
c[i][j] = 0;
// Start timers
gettimeofday(&startTime, NULL);
#pragma omp parallel private (i, j ,k)
{
#pragma omp for schedule (static, 8)
{
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
for(k = 0; k < NCA; k++)
c[i][j] += a[i][k] * b[k][j];
}
}
// End timers
gettimeofday(&finishTime, NULL);
//Calculate the interval length
timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000
+ (double)(finishTime.tv_usec-startTime.tv_usec);
timeIntervalLength=timeIntervalLength/1000;
//Print the interval lenght
printf("__aid_Time: %g msec.\n", timeIntervalLength);
/*** Print results ***/
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
sum += c[i][j];
printf("__aid_Result: %g\n\n", sum);
return 0;
}
|
paraloopend.h | /** \file
* \ingroup elbeem
*/
// same as grid loop_end + barrier
} // i
int i=0; //dummy
ADVANCE_POINTERS(2*gridLoopBound);
} // j
# if COMPRESSGRIDS==1
# if PARALLEL==1
//frintf(stderr," (id=%d k=%d) ",id,k);
#pragma omp barrier
# endif // PARALLEL==1
# else // COMPRESSGRIDS==1
int i=0; //dummy
ADVANCE_POINTERS(mLevel[lev].lSizex*2);
# endif // COMPRESSGRIDS==1
} // all cell loop k,j,i
#pragma omp critical
{
if(doReduce) {
// synchronize global vars
for(size_t j=0; j<calcListFull.size() ; j++) mListFull.push_back( calcListFull[j] );
for(size_t j=0; j<calcListEmpty.size(); j++) mListEmpty.push_back( calcListEmpty[j] );
for(size_t j=0; j<calcListParts.size(); j++) mpParticles->addFullParticle( calcListParts[j] );
if(calcMaxVlen>mMaxVlen) {
mMxvx = calcMxvx;
mMxvy = calcMxvy;
mMxvz = calcMxvz;
mMaxVlen = calcMaxVlen;
}
if(0) {debMsgStd("OMP_CRIT",DM_MSG, "reduce id"<<id<<" curr: "<<mMaxVlen<<"|"<<mMxvx<<","<<mMxvy<<","<<mMxvz<<
" calc[ "<<calcMaxVlen<<"|"<<calcMxvx<<","<<calcMxvy<<","<<calcMxvz<<"] " ,4 ); }
}
} // critical
} /* main_region */
//?lobOutstrForce = true;
|
par_amgdd_fac_cycle.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
HYPRE_Int
hypre_BoomerAMGDD_FAC( void *amgdd_vdata, HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
HYPRE_Int cycle_type = hypre_ParAMGDDDataFACCycleType(amgdd_data);
HYPRE_Int start_level = hypre_ParAMGDDDataStartLevel(amgdd_data);
if (cycle_type == 1 || cycle_type == 2)
{
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, cycle_type, first_iteration);
}
else if (cycle_type == 3)
{
hypre_BoomerAMGDD_FAC_FCycle(amgdd_vdata, first_iteration);
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: unknown AMG-DD FAC cycle type. Defaulting to 1 (V-cycle).\n");
hypre_ParAMGDDDataFACCycleType(amgdd_data) = 1;
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, 1, first_iteration);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Cycle( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_type,
HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data);
hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data);
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
HYPRE_Int i;
// Relax on the real nodes
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 1);
// Restrict the residual at all fine points (real and ghost) and set residual at coarse points not under the fine grid
if (num_levels > 1)
{
hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], first_iteration);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0);
// Either solve on the coarse level or recurse
if (level+1 == num_levels-1)
{
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3);
}
else for (i = 0; i < cycle_type; i++)
{
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level+1, cycle_type, first_iteration);
first_iteration = 0;
}
// Interpolate up and relax
hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]);
}
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 2);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_FCycle( void *amgdd_vdata,
HYPRE_Int first_iteration )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data);
hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data);
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
HYPRE_Int level;
// ... work down to coarsest ...
if (!first_iteration)
{
for (level = hypre_ParAMGDDDataStartLevel(amgdd_data); level < num_levels - 1; level++)
{
hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], 0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0);
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0);
}
}
// ... solve on coarsest level ...
hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3);
// ... and work back up to the finest
for (level = num_levels - 2; level > -1; level--)
{
// Interpolate up and relax
hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]);
// V-cycle
hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level, 1, 0);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Interpolate( hypre_AMGDDCompGrid *compGrid_f,
hypre_AMGDDCompGrid *compGrid_c )
{
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridP(compGrid_f),
hypre_AMGDDCompGridU(compGrid_c),
1.0, hypre_AMGDDCompGridU(compGrid_f));
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Restrict( hypre_AMGDDCompGrid *compGrid_f,
hypre_AMGDDCompGrid *compGrid_c,
HYPRE_Int first_iteration )
{
// Recalculate residual on coarse grid
if (!first_iteration)
{
hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridA(compGrid_c),
hypre_AMGDDCompGridU(compGrid_c),
1.0, hypre_AMGDDCompGridF(compGrid_c));
}
// Get update: s_l <- A_lt_l + s_l
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridA(compGrid_f),
hypre_AMGDDCompGridT(compGrid_f),
1.0, hypre_AMGDDCompGridS(compGrid_f));
// If we need to preserve the updates on the next level
if (hypre_AMGDDCompGridS(compGrid_c))
{
hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridR(compGrid_f),
hypre_AMGDDCompGridS(compGrid_f),
0.0, hypre_AMGDDCompGridS(compGrid_c));
// Subtract restricted update from recalculated residual: f_{l+1} <- f_{l+1} - s_{l+1}
hypre_AMGDDCompGridVectorAxpy(-1.0, hypre_AMGDDCompGridS(compGrid_c), hypre_AMGDDCompGridF(compGrid_c));
}
else
{
// Restrict and subtract update from recalculated residual: f_{l+1} <- f_{l+1} - P_l^Ts_l
hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridR(compGrid_f),
hypre_AMGDDCompGridS(compGrid_f),
1.0, hypre_AMGDDCompGridF(compGrid_c));
}
// Zero out initial guess on coarse grid
hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridU(compGrid_c), 0.0);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Relax( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Int numRelax = hypre_ParAMGDDDataFACNumRelax(amgdd_data);
HYPRE_Int i;
if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorCopy(hypre_AMGDDCompGridU(compGrid),
hypre_AMGDDCompGridTemp(compGrid));
hypre_AMGDDCompGridVectorScale(-1.0, hypre_AMGDDCompGridTemp(compGrid));
}
for (i = 0; i < numRelax; i++)
{
(*hypre_ParAMGDDDataUserFACRelaxation(amgdd_data))(amgdd_vdata, level, cycle_param);
}
if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridU(compGrid),
hypre_AMGDDCompGridTemp(compGrid));
if (hypre_AMGDDCompGridT(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridTemp(compGrid),
hypre_AMGDDCompGridT(compGrid));
}
if (hypre_AMGDDCompGridQ(compGrid))
{
hypre_AMGDDCompGridVectorAxpy(1.0,
hypre_AMGDDCompGridTemp(compGrid),
hypre_AMGDDCompGridQ(compGrid));
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_Jacobi( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location);
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGDD_FAC_JacobiDevice(amgdd_vdata, level);
}
else
#endif
{
hypre_BoomerAMGDD_FAC_JacobiHost(amgdd_vdata, level);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_JacobiHost( void *amgdd_vdata,
HYPRE_Int level )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data);
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
hypre_CSRMatrix *diag;
HYPRE_Int total_real_nodes;
HYPRE_Int i, j;
// Calculate l1_norms if necessary (right now, I'm just using this vector for the diagonal of A and doing straight ahead Jacobi)
if (!hypre_AMGDDCompGridL1Norms(compGrid))
{
total_real_nodes = hypre_AMGDDCompGridNumOwnedNodes(compGrid) +
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid);
hypre_AMGDDCompGridL1Norms(compGrid) = hypre_CTAlloc(HYPRE_Real,
total_real_nodes,
memory_location);
diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++)
{
// hypre_AMGDDCompGridL1Norms(compGrid)[i] += fabs(hypre_CSRMatrixData(diag)[j]);
if (hypre_CSRMatrixJ(diag)[j] == i)
{
hypre_AMGDDCompGridL1Norms(compGrid)[i] = hypre_CSRMatrixData(diag)[j];
}
}
}
diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++)
{
// hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] += fabs(hypre_CSRMatrixData(diag)[j]);
if (hypre_CSRMatrixJ(diag)[j] == i)
{
hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] = hypre_CSRMatrixData(diag)[j];
}
}
}
}
// Allocate temporary vector if necessary
if (!hypre_AMGDDCompGridTemp2(compGrid))
{
hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate();
hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid));
}
hypre_AMGDDCompGridVectorCopy(f, hypre_AMGDDCompGridTemp2(compGrid));
hypre_AMGDDCompGridMatvec(-relax_weight, A, u, relax_weight, hypre_AMGDDCompGridTemp2(compGrid));
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u))[i] +=
hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] /
hypre_AMGDDCompGridL1Norms(compGrid)[i];
}
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u))[i] +=
hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] /
hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)];
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_GaussSeidel( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A);
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A);
HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u));
HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u));
HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f));
HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f));
HYPRE_Int i, j; // loop variables
HYPRE_Complex diagonal; // placeholder for the diagonal of A
// Do Gauss-Seidel relaxation on the owned nodes
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
// Initialize u as RHS
u_owned_data[i] = f_owned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(owned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(owned_diag)[j];
}
else
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n");
}
u_owned_data[i] /= diagonal;
}
// Do Gauss-Seidel relaxation on the nonowned nodes
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
// Initialize u as RHS
u_nonowned_data[i] = f_nonowned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(nonowned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(nonowned_diag)[j];
}
else
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n");
}
u_nonowned_data[i] /= diagonal;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_OrderedGaussSeidel( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid);
hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid);
hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid);
HYPRE_Int unordered_i, i, j; // loop variables
HYPRE_Complex diagonal; // placeholder for the diagonal of A
if (!hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid))
{
hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int,
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridMemoryLocation(compGrid));
hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid));
}
if (!hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid))
{
hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int,
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridMemoryLocation(compGrid));
hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))),
hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid));
}
// Get all the info
HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u));
HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u));
HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f));
HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f));
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A);
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A);
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A);
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A);
// Do Gauss-Seidel relaxation on the nonowned real nodes
for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); unordered_i++)
{
i = hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)[unordered_i];
// Initialize u as RHS
u_nonowned_data[i] = f_nonowned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(nonowned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(nonowned_diag)[j];
}
else
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n");
}
u_nonowned_data[i] /= diagonal;
}
// Do Gauss-Seidel relaxation on the owned nodes
for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); unordered_i++)
{
i = hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)[unordered_i];
// Initialize u as RHS
u_owned_data[i] = f_owned_data[i];
diagonal = 0.0;
// Loop over diag entries
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
if (hypre_CSRMatrixJ(owned_diag)[j] == i)
{
diagonal = hypre_CSRMatrixData(owned_diag)[j];
}
else
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
}
// Loop over offd entries
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
// Divide by diagonal
if (diagonal == 0.0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n");
}
u_owned_data[i] /= diagonal;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_CFL1Jacobi( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int cycle_param )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location);
if (exec == HYPRE_EXEC_DEVICE)
{
if (cycle_param == 1)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1);
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
}
else if (cycle_param == 2)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1);
}
else
{
hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0);
}
}
else
#endif
{
if (cycle_param == 1)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1);
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
}
else if (cycle_param == 2)
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1);
}
else
{
hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0);
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGDD_FAC_CFL1JacobiHost( void *amgdd_vdata,
HYPRE_Int level,
HYPRE_Int relax_set )
{
hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata;
hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level];
HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data);
hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid));
hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(hypre_AMGDDCompGridA(compGrid));
HYPRE_Complex *owned_u = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridU(compGrid)));
HYPRE_Complex *nonowned_u = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridU(compGrid)));
HYPRE_Complex *owned_f = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridF(compGrid)));
HYPRE_Complex *nonowned_f = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridF(compGrid)));
HYPRE_Real *l1_norms = hypre_AMGDDCompGridL1Norms(compGrid);
HYPRE_Int *cf_marker = hypre_AMGDDCompGridCFMarkerArray(compGrid);
HYPRE_Complex *owned_tmp;
HYPRE_Complex *nonowned_tmp;
HYPRE_Int i, j;
HYPRE_Real res;
/*-----------------------------------------------------------------
* Create and initialize Temp2 vector if not done before.
*-----------------------------------------------------------------*/
if (!hypre_AMGDDCompGridTemp2(compGrid))
{
hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate();
hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid),
hypre_AMGDDCompGridNumOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedNodes(compGrid),
hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid));
}
owned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)));
nonowned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)));
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
owned_tmp[i] = owned_u[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedNodes(compGrid); i++)
{
nonowned_tmp[i] = nonowned_u[i];
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++)
{
if (cf_marker[i] == relax_set)
{
res = owned_f[i];
for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++)
{
res -= hypre_CSRMatrixData(owned_diag)[j] * owned_tmp[ hypre_CSRMatrixJ(owned_diag)[j] ];
}
for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++)
{
res -= hypre_CSRMatrixData(owned_offd)[j] * nonowned_tmp[ hypre_CSRMatrixJ(owned_offd)[j] ];
}
owned_u[i] += (relax_weight * res)/l1_norms[i];
}
}
for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++)
{
if (cf_marker[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] == relax_set)
{
res = nonowned_f[i];
for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++)
{
res -= hypre_CSRMatrixData(nonowned_diag)[j] * nonowned_tmp[ hypre_CSRMatrixJ(nonowned_diag)[j] ];
}
for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++)
{
res -= hypre_CSRMatrixData(nonowned_offd)[j] * owned_tmp[ hypre_CSRMatrixJ(nonowned_offd)[j] ];
}
nonowned_u[i] += (relax_weight * res)/l1_norms[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)];
}
}
return hypre_error_flag;
}
|
mapc.c |
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <memory.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <omp.h>
/*
//////////////////////////////////////////////////////
Example: create a 256x256 float image with 1 component:
struct m_image foo1 = M_IMAGE_IDENTITY();
struct m_image foo2 = M_IMAGE_IDENTITY();
int x, y;
m_image_create(&foo1, M_FLOAT, 256, 256, 1);
memset(foo1.data, 0, foo1.size * sizeof(float)); // clear to zero
y = 128; x = 128;
((float *)foo1.data)[y * foo1.width + x] = 1.0f; // set (x, y) pixel to one
m_image_gaussian_blur(&foo2, &foo1, 3, 3); // apply Gaussian blur
m_image_destroy(&foo2);
m_image_destroy(&foo1);
*/
struct m_image
{
void *data;
int size;
int width;
int height;
int comp;
char type;
};
#define M_VOID 0
#define M_BOOL 1
#define M_BYTE 2
#define M_UBYTE 3
#define M_SHORT 4
#define M_USHORT 5
#define M_INT 6
#define M_UINT 7
#define M_HALF 8
#define M_FLOAT 9
#define M_DOUBLE 10
#ifndef M_SAFE_FREE
#define M_SAFE_FREE(p) {if (p) {free(p); (p) = NULL;}}
#endif
/* m_image type util */
int m_type_sizeof(char type);
/* fully supported types are: M_UBYTE, M_USHORT, M_HALF, M_FLOAT
partially supported types: M_BYTE, M_SHORT, M_INT, M_UINT (no support for conversion) */
void m_image_create(struct m_image *image, char type_, int width, int height, int comp);
void m_image_destroy(struct m_image *image);
void inline m_flip_buffer(struct m_image *src, struct m_image *dest);
int inline vmap_buffer_c(int index, int width, int height, int depth);
void inline test_array_inplace(struct m_image *src);
void inline test_rgb_inplace(struct m_image *src, struct m_image *red, struct m_image *green, struct m_image *blue);
uint32_t m__exponent[64] = {
0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000,
0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000,
0x06000000, 0x06800000, 0x07000000, 0x07800000, 0x08000000, 0x08800000,
0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000,
0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000,
0x0f000000, 0x47800000, 0x80000000, 0x80800000, 0x81000000, 0x81800000,
0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000,
0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000,
0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000,
0x8b000000, 0x8b800000, 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000,
0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000
};
uint32_t m__mantissa[2048] = {
0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000,
0x34c00000, 0x34e00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000,
0x35400000, 0x35500000, 0x35600000, 0x35700000, 0x35800000, 0x35880000,
0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000,
0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000,
0x35f00000, 0x35f80000, 0x36000000, 0x36040000, 0x36080000, 0x360c0000,
0x36100000, 0x36140000, 0x36180000, 0x361c0000, 0x36200000, 0x36240000,
0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000,
0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000,
0x36580000, 0x365c0000, 0x36600000, 0x36640000, 0x36680000, 0x366c0000,
0x36700000, 0x36740000, 0x36780000, 0x367c0000, 0x36800000, 0x36820000,
0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000,
0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000,
0x369c0000, 0x369e0000, 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000,
0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000, 0x36b00000, 0x36b20000,
0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000,
0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000,
0x36cc0000, 0x36ce0000, 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000,
0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000, 0x36e00000, 0x36e20000,
0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000,
0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000,
0x36fc0000, 0x36fe0000, 0x37000000, 0x37010000, 0x37020000, 0x37030000,
0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000,
0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000,
0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000,
0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371a0000, 0x371b0000,
0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000, 0x37200000, 0x37210000,
0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000,
0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000,
0x372e0000, 0x372f0000, 0x37300000, 0x37310000, 0x37320000, 0x37330000,
0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000,
0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000,
0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000,
0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374a0000, 0x374b0000,
0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000, 0x37500000, 0x37510000,
0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000,
0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000,
0x375e0000, 0x375f0000, 0x37600000, 0x37610000, 0x37620000, 0x37630000,
0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000,
0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000,
0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000,
0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377a0000, 0x377b0000,
0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000, 0x37800000, 0x37808000,
0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000,
0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000,
0x37870000, 0x37878000, 0x37880000, 0x37888000, 0x37890000, 0x37898000,
0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000, 0x378c0000, 0x378c8000,
0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000,
0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000,
0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000,
0x37960000, 0x37968000, 0x37970000, 0x37978000, 0x37980000, 0x37988000,
0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000,
0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000,
0x379f0000, 0x379f8000, 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000,
0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000, 0x37a40000, 0x37a48000,
0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000,
0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000,
0x37ab0000, 0x37ab8000, 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000,
0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000, 0x37b00000, 0x37b08000,
0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000,
0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000,
0x37b70000, 0x37b78000, 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000,
0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000, 0x37bc0000, 0x37bc8000,
0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000,
0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000,
0x37c30000, 0x37c38000, 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000,
0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000, 0x37c80000, 0x37c88000,
0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000,
0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000,
0x37cf0000, 0x37cf8000, 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000,
0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000, 0x37d40000, 0x37d48000,
0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000,
0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000,
0x37db0000, 0x37db8000, 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000,
0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000, 0x37e00000, 0x37e08000,
0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000,
0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000,
0x37e70000, 0x37e78000, 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000,
0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000, 0x37ec0000, 0x37ec8000,
0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000,
0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000,
0x37f30000, 0x37f38000, 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000,
0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000, 0x37f80000, 0x37f88000,
0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000,
0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000,
0x37ff0000, 0x37ff8000, 0x38000000, 0x38004000, 0x38008000, 0x3800c000,
0x38010000, 0x38014000, 0x38018000, 0x3801c000, 0x38020000, 0x38024000,
0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000,
0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000,
0x38058000, 0x3805c000, 0x38060000, 0x38064000, 0x38068000, 0x3806c000,
0x38070000, 0x38074000, 0x38078000, 0x3807c000, 0x38080000, 0x38084000,
0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000,
0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000,
0x380b8000, 0x380bc000, 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000,
0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000, 0x380e0000, 0x380e4000,
0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000,
0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000,
0x38118000, 0x3811c000, 0x38120000, 0x38124000, 0x38128000, 0x3812c000,
0x38130000, 0x38134000, 0x38138000, 0x3813c000, 0x38140000, 0x38144000,
0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000,
0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000,
0x38178000, 0x3817c000, 0x38180000, 0x38184000, 0x38188000, 0x3818c000,
0x38190000, 0x38194000, 0x38198000, 0x3819c000, 0x381a0000, 0x381a4000,
0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000,
0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000,
0x381d8000, 0x381dc000, 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000,
0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000, 0x38200000, 0x38204000,
0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000,
0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000,
0x38238000, 0x3823c000, 0x38240000, 0x38244000, 0x38248000, 0x3824c000,
0x38250000, 0x38254000, 0x38258000, 0x3825c000, 0x38260000, 0x38264000,
0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000,
0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000,
0x38298000, 0x3829c000, 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000,
0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000, 0x382c0000, 0x382c4000,
0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000,
0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000,
0x382f8000, 0x382fc000, 0x38300000, 0x38304000, 0x38308000, 0x3830c000,
0x38310000, 0x38314000, 0x38318000, 0x3831c000, 0x38320000, 0x38324000,
0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000,
0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000,
0x38358000, 0x3835c000, 0x38360000, 0x38364000, 0x38368000, 0x3836c000,
0x38370000, 0x38374000, 0x38378000, 0x3837c000, 0x38380000, 0x38384000,
0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000,
0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000,
0x383b8000, 0x383bc000, 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000,
0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000, 0x383e0000, 0x383e4000,
0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000,
0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000,
0x38418000, 0x3841c000, 0x38420000, 0x38424000, 0x38428000, 0x3842c000,
0x38430000, 0x38434000, 0x38438000, 0x3843c000, 0x38440000, 0x38444000,
0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000,
0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000,
0x38478000, 0x3847c000, 0x38480000, 0x38484000, 0x38488000, 0x3848c000,
0x38490000, 0x38494000, 0x38498000, 0x3849c000, 0x384a0000, 0x384a4000,
0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000,
0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000,
0x384d8000, 0x384dc000, 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000,
0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000, 0x38500000, 0x38504000,
0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000,
0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000,
0x38538000, 0x3853c000, 0x38540000, 0x38544000, 0x38548000, 0x3854c000,
0x38550000, 0x38554000, 0x38558000, 0x3855c000, 0x38560000, 0x38564000,
0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000,
0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000,
0x38598000, 0x3859c000, 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000,
0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000, 0x385c0000, 0x385c4000,
0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000,
0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000,
0x385f8000, 0x385fc000, 0x38600000, 0x38604000, 0x38608000, 0x3860c000,
0x38610000, 0x38614000, 0x38618000, 0x3861c000, 0x38620000, 0x38624000,
0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000,
0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000,
0x38658000, 0x3865c000, 0x38660000, 0x38664000, 0x38668000, 0x3866c000,
0x38670000, 0x38674000, 0x38678000, 0x3867c000, 0x38680000, 0x38684000,
0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000,
0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000,
0x386b8000, 0x386bc000, 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000,
0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000, 0x386e0000, 0x386e4000,
0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000,
0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000,
0x38718000, 0x3871c000, 0x38720000, 0x38724000, 0x38728000, 0x3872c000,
0x38730000, 0x38734000, 0x38738000, 0x3873c000, 0x38740000, 0x38744000,
0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000,
0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000,
0x38778000, 0x3877c000, 0x38780000, 0x38784000, 0x38788000, 0x3878c000,
0x38790000, 0x38794000, 0x38798000, 0x3879c000, 0x387a0000, 0x387a4000,
0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000,
0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000,
0x387d8000, 0x387dc000, 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000,
0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000, 0x38000000, 0x38002000,
0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000,
0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000,
0x3801c000, 0x3801e000, 0x38020000, 0x38022000, 0x38024000, 0x38026000,
0x38028000, 0x3802a000, 0x3802c000, 0x3802e000, 0x38030000, 0x38032000,
0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000,
0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000,
0x3804c000, 0x3804e000, 0x38050000, 0x38052000, 0x38054000, 0x38056000,
0x38058000, 0x3805a000, 0x3805c000, 0x3805e000, 0x38060000, 0x38062000,
0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000,
0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000,
0x3807c000, 0x3807e000, 0x38080000, 0x38082000, 0x38084000, 0x38086000,
0x38088000, 0x3808a000, 0x3808c000, 0x3808e000, 0x38090000, 0x38092000,
0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000,
0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000,
0x380ac000, 0x380ae000, 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000,
0x380b8000, 0x380ba000, 0x380bc000, 0x380be000, 0x380c0000, 0x380c2000,
0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000,
0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000,
0x380dc000, 0x380de000, 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000,
0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000, 0x380f0000, 0x380f2000,
0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000,
0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000,
0x3810c000, 0x3810e000, 0x38110000, 0x38112000, 0x38114000, 0x38116000,
0x38118000, 0x3811a000, 0x3811c000, 0x3811e000, 0x38120000, 0x38122000,
0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000,
0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000,
0x3813c000, 0x3813e000, 0x38140000, 0x38142000, 0x38144000, 0x38146000,
0x38148000, 0x3814a000, 0x3814c000, 0x3814e000, 0x38150000, 0x38152000,
0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000,
0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000,
0x3816c000, 0x3816e000, 0x38170000, 0x38172000, 0x38174000, 0x38176000,
0x38178000, 0x3817a000, 0x3817c000, 0x3817e000, 0x38180000, 0x38182000,
0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000,
0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000,
0x3819c000, 0x3819e000, 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000,
0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000, 0x381b0000, 0x381b2000,
0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000,
0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000,
0x381cc000, 0x381ce000, 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000,
0x381d8000, 0x381da000, 0x381dc000, 0x381de000, 0x381e0000, 0x381e2000,
0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000,
0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000,
0x381fc000, 0x381fe000, 0x38200000, 0x38202000, 0x38204000, 0x38206000,
0x38208000, 0x3820a000, 0x3820c000, 0x3820e000, 0x38210000, 0x38212000,
0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000,
0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000,
0x3822c000, 0x3822e000, 0x38230000, 0x38232000, 0x38234000, 0x38236000,
0x38238000, 0x3823a000, 0x3823c000, 0x3823e000, 0x38240000, 0x38242000,
0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000,
0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000,
0x3825c000, 0x3825e000, 0x38260000, 0x38262000, 0x38264000, 0x38266000,
0x38268000, 0x3826a000, 0x3826c000, 0x3826e000, 0x38270000, 0x38272000,
0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000,
0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000,
0x3828c000, 0x3828e000, 0x38290000, 0x38292000, 0x38294000, 0x38296000,
0x38298000, 0x3829a000, 0x3829c000, 0x3829e000, 0x382a0000, 0x382a2000,
0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000,
0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000,
0x382bc000, 0x382be000, 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000,
0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000, 0x382d0000, 0x382d2000,
0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000,
0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000,
0x382ec000, 0x382ee000, 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000,
0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000, 0x38300000, 0x38302000,
0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000,
0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000,
0x3831c000, 0x3831e000, 0x38320000, 0x38322000, 0x38324000, 0x38326000,
0x38328000, 0x3832a000, 0x3832c000, 0x3832e000, 0x38330000, 0x38332000,
0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000,
0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000,
0x3834c000, 0x3834e000, 0x38350000, 0x38352000, 0x38354000, 0x38356000,
0x38358000, 0x3835a000, 0x3835c000, 0x3835e000, 0x38360000, 0x38362000,
0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000,
0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000,
0x3837c000, 0x3837e000, 0x38380000, 0x38382000, 0x38384000, 0x38386000,
0x38388000, 0x3838a000, 0x3838c000, 0x3838e000, 0x38390000, 0x38392000,
0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000,
0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000,
0x383ac000, 0x383ae000, 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000,
0x383b8000, 0x383ba000, 0x383bc000, 0x383be000, 0x383c0000, 0x383c2000,
0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000,
0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000,
0x383dc000, 0x383de000, 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000,
0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000, 0x383f0000, 0x383f2000,
0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000,
0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000,
0x3840c000, 0x3840e000, 0x38410000, 0x38412000, 0x38414000, 0x38416000,
0x38418000, 0x3841a000, 0x3841c000, 0x3841e000, 0x38420000, 0x38422000,
0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000,
0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000,
0x3843c000, 0x3843e000, 0x38440000, 0x38442000, 0x38444000, 0x38446000,
0x38448000, 0x3844a000, 0x3844c000, 0x3844e000, 0x38450000, 0x38452000,
0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000,
0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000,
0x3846c000, 0x3846e000, 0x38470000, 0x38472000, 0x38474000, 0x38476000,
0x38478000, 0x3847a000, 0x3847c000, 0x3847e000, 0x38480000, 0x38482000,
0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000,
0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000,
0x3849c000, 0x3849e000, 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000,
0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000, 0x384b0000, 0x384b2000,
0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000,
0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000,
0x384cc000, 0x384ce000, 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000,
0x384d8000, 0x384da000, 0x384dc000, 0x384de000, 0x384e0000, 0x384e2000,
0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000,
0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000,
0x384fc000, 0x384fe000, 0x38500000, 0x38502000, 0x38504000, 0x38506000,
0x38508000, 0x3850a000, 0x3850c000, 0x3850e000, 0x38510000, 0x38512000,
0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000,
0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000,
0x3852c000, 0x3852e000, 0x38530000, 0x38532000, 0x38534000, 0x38536000,
0x38538000, 0x3853a000, 0x3853c000, 0x3853e000, 0x38540000, 0x38542000,
0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000,
0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000,
0x3855c000, 0x3855e000, 0x38560000, 0x38562000, 0x38564000, 0x38566000,
0x38568000, 0x3856a000, 0x3856c000, 0x3856e000, 0x38570000, 0x38572000,
0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000,
0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000,
0x3858c000, 0x3858e000, 0x38590000, 0x38592000, 0x38594000, 0x38596000,
0x38598000, 0x3859a000, 0x3859c000, 0x3859e000, 0x385a0000, 0x385a2000,
0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000,
0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000,
0x385bc000, 0x385be000, 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000,
0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000, 0x385d0000, 0x385d2000,
0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000,
0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000,
0x385ec000, 0x385ee000, 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000,
0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000, 0x38600000, 0x38602000,
0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000,
0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000,
0x3861c000, 0x3861e000, 0x38620000, 0x38622000, 0x38624000, 0x38626000,
0x38628000, 0x3862a000, 0x3862c000, 0x3862e000, 0x38630000, 0x38632000,
0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000,
0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000,
0x3864c000, 0x3864e000, 0x38650000, 0x38652000, 0x38654000, 0x38656000,
0x38658000, 0x3865a000, 0x3865c000, 0x3865e000, 0x38660000, 0x38662000,
0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000,
0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000,
0x3867c000, 0x3867e000, 0x38680000, 0x38682000, 0x38684000, 0x38686000,
0x38688000, 0x3868a000, 0x3868c000, 0x3868e000, 0x38690000, 0x38692000,
0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000,
0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000,
0x386ac000, 0x386ae000, 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000,
0x386b8000, 0x386ba000, 0x386bc000, 0x386be000, 0x386c0000, 0x386c2000,
0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000,
0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000,
0x386dc000, 0x386de000, 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000,
0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000, 0x386f0000, 0x386f2000,
0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000,
0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000,
0x3870c000, 0x3870e000, 0x38710000, 0x38712000, 0x38714000, 0x38716000,
0x38718000, 0x3871a000, 0x3871c000, 0x3871e000, 0x38720000, 0x38722000,
0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000,
0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000,
0x3873c000, 0x3873e000, 0x38740000, 0x38742000, 0x38744000, 0x38746000,
0x38748000, 0x3874a000, 0x3874c000, 0x3874e000, 0x38750000, 0x38752000,
0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000,
0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000,
0x3876c000, 0x3876e000, 0x38770000, 0x38772000, 0x38774000, 0x38776000,
0x38778000, 0x3877a000, 0x3877c000, 0x3877e000, 0x38780000, 0x38782000,
0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000,
0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000,
0x3879c000, 0x3879e000, 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000,
0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000, 0x387b0000, 0x387b2000,
0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000,
0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000,
0x387cc000, 0x387ce000, 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000,
0x387d8000, 0x387da000, 0x387dc000, 0x387de000, 0x387e0000, 0x387e2000,
0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000,
0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000,
0x387fc000, 0x387fe000
};
int m_type_sizeof(char type)
{
switch (type) {
case M_BYTE:
case M_UBYTE:
return sizeof(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
return sizeof(uint16_t);
break;
case M_BOOL:
case M_INT:
case M_UINT:
return sizeof(uint32_t);
break;
case M_FLOAT:
return sizeof(float);
break;
case M_DOUBLE:
return sizeof(double);
break;
default:
assert(0);
return 0;
}
}
void m_image_create(struct m_image *image, char type_, int width, int height, int comp)
{
// Init structure image
image->data = 0;
image->size = 0;
image->width = 0;
image->height = 0;
image->type = 0;
image->comp = 0;
int size = width * height * comp;
assert(size > 0);
M_SAFE_FREE(image->data);
// Allocate memory
image->data = malloc(size * m_type_sizeof(type_));
if( !image->data )
printf("BAD ALLOC:m_image_create\n");
image->type = type_;
image->width = width;
image->height = height;
image->comp = comp;
image->size = size;
// Reset all the pixels
memset(image->data, 0, image->size * sizeof(unsigned char));
}
void m_image_destroy(struct m_image *image)
{
M_SAFE_FREE(image->data);
memset(image, 0, sizeof(struct m_image));
}
inline int vmap_buffer_c(int index, int width, int height, int depth)
{
/*
Vertically flipped a single buffer value.
:param index: integer; index value
:param width: integer; image width
:param height: integer; image height
:param depth: integer; image depth (3)RGB or (4)RGBA
:return: integer value pointing to the pixel in the buffer (traversed vertically).
*/
int x, y, z, ix;
ix = (int)(index / 4);
y = (int)(ix / height);
x = ix % height;
z = index % depth;
return (x * width * depth) + (depth * y) + z;
}
void inline m_flip_buffer(struct m_image *src, struct m_image *dst)
{
unsigned char *src_p, *dst_p;
src_p = (unsigned char *)src->data;
dst_p = (unsigned char *)dst->data;
int i, index, avg;
int d = src->comp;
for (i=0; i<src->size; i+=d){
index = vmap_buffer_c(i, src->width, src->height, d);
avg = (unsigned char)((src_p[i] + src_p[i + 1] + src_p[i + 2]) / 3.0);
dst_p[i ] = avg;
dst_p[i + 1] = avg;
dst_p[i + 2] = avg;
}
}
void inline test_array_inplace(struct m_image *src)
{
unsigned char *src_p;
src_p = (unsigned char *)src->data;
int i;
int d = src->comp;
for (i=0; i<src->size; i+=1){
src_p[i] = src_p[i];
}
}
void inline test_rgb_inplace(struct m_image *src, struct m_image *red, struct m_image *green, struct m_image *blue)
{
unsigned char *src_p;
unsigned char *red_p;
unsigned char *green_p;
unsigned char *blue_p;
src_p = (unsigned char *)src->data;
red_p = (unsigned char *)red->data;
green_p = (unsigned char *)green->data;
blue_p = (unsigned char *)blue->data;
int i;
int d = src->comp;
int n = src->size;
#pragma omp for schedule(static) nowait
for (i=0; i<n; i+=3){
red_p[i] = src_p[i];
red_p[i+1] = 1;
red_p[i+2] = 1;
green_p[i+1] = src_p[i+1];
green_p[i] = 1;
green_p[i+2] = 1;
blue_p[i] = 1;
blue_p[i+1] = 1;
blue_p[i+2] = src_p[i+2];
}
}
int main(){
return 0;
} |
mandelbrot_area.c | #include<stdio.h>
#include<omp.h>
#define NPOINTS 1000
#define MXITR 1000
void testpoint (void);
struct d_complex {
double r;
double i;
};
struct d_complex c;
int numoutside = 0;
int main(){
int i, j;
double area, error, eps = 1.0e-5;
#pragma omp parallel for default(shared) private(c, eps)
for (i=0; i<NPOINTS;i++){
for (j=0; j<NPOINTS; j++) {
c.r = -2.0+2.5*(double)(i)/(double)(NPOINTS)+eps;
c.i = 1.125*(double)(j)/(double)(NPOINTS)+eps;
testpoint();
}
}
area = 2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error = area/(double)NPOINTS;
printf("area = %f\nerror = %f\n", area, error);
return 0;
}
void testpoint(void){
struct d_complex z;
int iter;
double temp;
z = c;
for (iter=0; iter<MXITR; iter++){
temp = (z.r*z.r)-(z.i*z.i)+c.r;
z.i = z.r*z.i*2+c.i;
z.r = temp;
if ((z.r*z.r + z.i*z.i)>4.0){
numoutside++;
break;
}
}
}
|
482.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute dist_schedule(static, #p11)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_CUDA) /* CUDA */
#ifdef HYPRE_BIGINT
HYPRE_Int ierr = hypre_CSRMatrixMatvecDeviceBIGINT(alpha, A, x, beta, b, y, offset);
#else
HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice(0, alpha, A, x, beta, b, y, offset);
#endif
#elif defined(HYPRE_USING_DEVICE_OPENMP) /* OMP 4.5 */
HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP(0, alpha, A, x, beta, b, y, offset);
#else /* CPU */
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj, m, ierr=0;
HYPRE_Real xpar=0.7;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
hypre_assert( num_vectors == hypre_VectorNumVectors(b) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size || num_rows != b_size)
ierr = 2;
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = beta*b_data[i];
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows) || num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i]*temp;
}
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i];
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] += tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] += tempx;
}
}
}
else // num_vectors > 1
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
}
else
{ // JSP: this is currently the only path optimized
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (0 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (-1 == temp) // beta == -alpha
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x - y
else if (-1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (1 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + temp*y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
#endif /* CPU */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#if defined(HYPRE_USING_CUDA) /* CUDA */
HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice(1, alpha, A, x, beta, y, y, 0 );
#elif defined(HYPRE_USING_DEVICE_OPENMP) /* OMP 4.5 */
HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP(1, alpha, A, x, beta, y, y, 0);
#else /* CPU */
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size*my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
{
y_data[i] *= alpha;
}
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
task_untied_threadid2.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include <string.h>
#include <stdio.h>
int test_task_untied_threadid2(int num_threads) {
int i, vals[NUM_TASKS];
ABT_thread abt_threads[NUM_TASKS];
memset(vals, 0, sizeof(vals));
#pragma omp parallel num_threads(num_threads)
{
#pragma omp master
{
for (i = 0; i < NUM_TASKS; i++) {
#pragma omp task firstprivate(i) untied
{
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_threads[i]));
// Context switching in OpenMP.
#pragma omp taskyield
int omp_thread_id2 = omp_get_thread_num();
ABT_thread abt_thread = abt_threads[i];
ABT_thread abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2));
ABT_bool abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2,
&abt_thread_equal));
if (abt_thread_equal == ABT_TRUE) {
vals[i] += 1;
}
// Context switching in Argobots.
ABT_EXIT_IF_FAIL(ABT_thread_yield());
int omp_thread_id3 = omp_get_thread_num();
if (omp_thread_id2 == omp_thread_id3) {
// Argobots context switch does not change the thread-task mapping.
vals[i] += 2;
}
}
}
}
}
for (i = 0; i < NUM_TASKS; i++) {
if (vals[i] != 3) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_task_untied_threadid2(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
GB_binop__min_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__min_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp32)
// A*D function (colscale): GB (_AxD__min_fp32)
// D*A function (rowscale): GB (_DxB__min_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp32)
// C=scalar+B GB (_bind1st__min_fp32)
// C=scalar+B' GB (_bind1st_tran__min_fp32)
// C=A+scalar GB (_bind2nd__min_fp32)
// C=A'+scalar GB (_bind2nd_tran__min_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fminf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fminf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_FP32 || GxB_NO_MIN_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fminf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fminf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fminf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fminf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
OnchainWithdrawalCircuit.h | #ifndef _ONCHAINWITHDRAWALCIRCUIT_H_
#define _ONCHAINWITHDRAWALCIRCUIT_H_
#include "Circuit.h"
#include "../Utils/Constants.h"
#include "../Utils/Data.h"
#include "../Utils/Utils.h"
#include "../Gadgets/AccountGadgets.h"
#include "ethsnarks.hpp"
#include "utils.hpp"
#include "gadgets/merkle_tree.hpp"
using namespace ethsnarks;
namespace Loopring
{
class OnchainWithdrawalGadget : public GadgetT
{
public:
const Constants& constants;
// User state
BalanceGadget balanceBefore;
AccountGadget accountBefore;
// Inputs
DualVariableGadget accountID;
DualVariableGadget tokenID;
DualVariableGadget amountRequested;
// Calculate how much can be withdrawn
MinGadget amountToWithdrawMin;
TernaryGadget amountToWithdraw;
// Float
FloatGadget amountWithdrawn;
RequireAccuracyGadget requireAccuracyAmountWithdrawn;
// Shutdown mode
TernaryGadget amountToSubtract;
TernaryGadget tradingHistoryAfter;
TernaryGadget publicKeyXAfter;
TernaryGadget publicKeyYAfter;
TernaryGadget nonceAfter;
// Calculate the new balance
UnsafeSubGadget balance_after;
// Update User
UpdateBalanceGadget updateBalance_A;
UpdateAccountGadget updateAccount_A;
OnchainWithdrawalGadget(
ProtoboardT& pb,
const Constants& _constants,
const VariableT& accountsMerkleRoot,
const VariableT& bShutdownMode,
const std::string& prefix
) :
GadgetT(pb, prefix),
constants(_constants),
// User state
balanceBefore(pb, FMT(prefix, ".balanceBefore")),
accountBefore(pb, FMT(prefix, ".accountBefore")),
// Inputs
accountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID")),
tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")),
amountRequested(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amountRequested")),
// Calculate how much can be withdrawn
// In shutdown mode always withdraw the complete balance
amountToWithdrawMin(pb, amountRequested.packed, balanceBefore.balance, NUM_BITS_AMOUNT, FMT(prefix, ".min(amountRequested, balance)")),
amountToWithdraw(pb, bShutdownMode, balanceBefore.balance, amountToWithdrawMin.result(), FMT(prefix, ".amountToWithdraw")),
// Float
amountWithdrawn(pb, constants, Float24Encoding, FMT(prefix, ".amountWithdrawn")),
requireAccuracyAmountWithdrawn(pb, amountWithdrawn.value(), amountToWithdraw.result(), Float24Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyAmountRequested")),
// Shutdown mode - Reset values to genesis state
amountToSubtract(pb, bShutdownMode, amountToWithdraw.result(), amountWithdrawn.value(), FMT(prefix, ".amountToSubtract")),
tradingHistoryAfter(pb, bShutdownMode, constants.emptyTradeHistory, balanceBefore.tradingHistory, FMT(prefix, ".tradingHistoryAfter")),
publicKeyXAfter(pb, bShutdownMode, constants.zero, accountBefore.publicKey.x, FMT(prefix, ".publicKeyXAfter")),
publicKeyYAfter(pb, bShutdownMode, constants.zero, accountBefore.publicKey.y, FMT(prefix, ".publicKeyYAfter")),
nonceAfter(pb, bShutdownMode, constants.zero, accountBefore.nonce, FMT(prefix, ".nonceAfter")),
// Calculate the new balance
balance_after(pb, balanceBefore.balance, amountToSubtract.result(), FMT(prefix, ".balance_after")),
// Update User
updateBalance_A(pb, accountBefore.balancesRoot, tokenID.bits,
{balanceBefore.balance, balanceBefore.tradingHistory},
{balance_after.result(), tradingHistoryAfter.result()},
FMT(prefix, ".updateBalance_A")),
updateAccount_A(pb, accountsMerkleRoot, accountID.bits,
{accountBefore.publicKey.x, accountBefore.publicKey.y, accountBefore.nonce, accountBefore.balancesRoot},
{publicKeyXAfter.result(), publicKeyYAfter.result(), nonceAfter.result(), updateBalance_A.result()},
FMT(prefix, ".updateAccount_A"))
{
}
void generate_r1cs_witness(const OnchainWithdrawal& withdrawal)
{
// User state
balanceBefore.generate_r1cs_witness(withdrawal.balanceUpdate.before);
accountBefore.generate_r1cs_witness(withdrawal.accountUpdate.before);
// Inputs
accountID.generate_r1cs_witness(pb, withdrawal.accountUpdate.accountID);
tokenID.generate_r1cs_witness(pb, withdrawal.balanceUpdate.tokenID);
amountRequested.generate_r1cs_witness(pb, withdrawal.amountRequested);
// Withdrawal calculations
amountToWithdrawMin.generate_r1cs_witness();
amountToWithdraw.generate_r1cs_witness();
// Float
amountWithdrawn.generate_r1cs_witness(toFloat(pb.val(amountToWithdraw.result()), Float24Encoding));
requireAccuracyAmountWithdrawn.generate_r1cs_witness();
// Shutdown mode
amountToSubtract.generate_r1cs_witness();
tradingHistoryAfter.generate_r1cs_witness();
publicKeyXAfter.generate_r1cs_witness();
publicKeyYAfter.generate_r1cs_witness();
nonceAfter.generate_r1cs_witness();
// Calculate the new balance
balance_after.generate_r1cs_witness();
// Update User
updateBalance_A.generate_r1cs_witness(withdrawal.balanceUpdate.proof);
updateAccount_A.generate_r1cs_witness(withdrawal.accountUpdate.proof);
}
void generate_r1cs_constraints()
{
// Inputs
accountID.generate_r1cs_constraints(true);
tokenID.generate_r1cs_constraints(true);
amountRequested.generate_r1cs_constraints(true);
// Withdrawal calculations
amountToWithdrawMin.generate_r1cs_constraints();
amountToWithdraw.generate_r1cs_constraints();
// Float
amountWithdrawn.generate_r1cs_constraints();
requireAccuracyAmountWithdrawn.generate_r1cs_constraints();
// Shutdown mode
amountToSubtract.generate_r1cs_constraints();
tradingHistoryAfter.generate_r1cs_constraints();
publicKeyXAfter.generate_r1cs_constraints();
publicKeyYAfter.generate_r1cs_constraints();
nonceAfter.generate_r1cs_constraints();
// Calculate the new balance
balance_after.generate_r1cs_constraints();
// Update User
updateBalance_A.generate_r1cs_constraints();
updateAccount_A.generate_r1cs_constraints();
}
const std::vector<VariableArrayT> getOnchainData() const
{
return {accountID.bits,
VariableArrayT(6, constants.zero), tokenID.bits,
amountRequested.bits};
}
const std::vector<VariableArrayT> getApprovedWithdrawalData() const
{
return {VariableArrayT(6, constants.zero), tokenID.bits,
accountID.bits,
amountWithdrawn.bits()};
}
const VariableT& getNewAccountsRoot() const
{
return updateAccount_A.result();
}
};
class OnchainWithdrawalCircuit : public Circuit
{
public:
PublicDataGadget publicData;
Constants constants;
// Inputs
DualVariableGadget exchangeID;
DualVariableGadget merkleRootBefore;
DualVariableGadget merkleRootAfter;
DualVariableGadget withdrawalBlockHashStart;
DualVariableGadget startIndex;
DualVariableGadget count;
// Shutdown
EqualGadget bShutdownMode;
// Withdrawals
unsigned int numWithdrawals;
std::vector<OnchainWithdrawalGadget> withdrawals;
std::vector<sha256_many> hashers;
OnchainWithdrawalCircuit(ProtoboardT& pb, const std::string& prefix) :
Circuit(pb, prefix),
publicData(pb, FMT(prefix, ".publicData")),
constants(pb, FMT(prefix, ".constants")),
// Inputs
exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")),
merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")),
merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")),
withdrawalBlockHashStart(pb, 256, FMT(prefix, ".withdrawalBlockHashStart")),
startIndex(pb, 32, FMT(prefix, ".startIndex")),
count(pb, 32, FMT(prefix, ".count")),
// Shutdown
bShutdownMode(pb, count.packed, constants.zero, FMT(prefix, ".bShutdownMode"))
{
}
void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override
{
this->numWithdrawals = blockSize;
constants.generate_r1cs_constraints();
// Inputs
exchangeID.generate_r1cs_constraints(true);
merkleRootBefore.generate_r1cs_constraints(true);
merkleRootAfter.generate_r1cs_constraints(true);
withdrawalBlockHashStart.generate_r1cs_constraints(true);
startIndex.generate_r1cs_constraints(true);
count.generate_r1cs_constraints(true);
// Shutdown
bShutdownMode.generate_r1cs_constraints();
// Withdrawals
withdrawals.reserve(numWithdrawals);
hashers.reserve(numWithdrawals);
for (size_t j = 0; j < numWithdrawals; j++)
{
VariableT withdrawalAccountsRoot = (j == 0) ? merkleRootBefore.packed : withdrawals.back().getNewAccountsRoot();
withdrawals.emplace_back(
pb,
constants,
withdrawalAccountsRoot,
bShutdownMode.result(),
std::string("withdrawals_") + std::to_string(j)
);
withdrawals.back().generate_r1cs_constraints();
// Hash data from withdrawal request
std::vector<VariableArrayT> withdrawalRequestData = withdrawals.back().getOnchainData();
std::vector<VariableArrayT> hash;
hash.push_back(reverse((j == 0) ? withdrawalBlockHashStart.bits : hashers.back().result().bits));
hash.insert(hash.end(), withdrawalRequestData.begin(), withdrawalRequestData.end());
hashers.emplace_back(pb, flattenReverse(hash), std::string("hash_") + std::to_string(j));
hashers.back().generate_r1cs_constraints();
}
// Public data
publicData.add(exchangeID.bits);
publicData.add(merkleRootBefore.bits);
publicData.add(merkleRootAfter.bits);
publicData.add(reverse(withdrawalBlockHashStart.bits));
publicData.add(reverse(hashers.back().result().bits));
publicData.add(startIndex.bits);
publicData.add(count.bits);
// Store the approved data for all withdrawals
for (auto& withdrawal : withdrawals)
{
publicData.add(withdrawal.getApprovedWithdrawalData());
}
publicData.generate_r1cs_constraints();
// Check the new merkle root
requireEqual(pb, withdrawals.back().getNewAccountsRoot(), merkleRootAfter.packed, "newMerkleRoot");
}
bool generateWitness(const OnchainWithdrawalBlock& block)
{
constants.generate_r1cs_witness();
// Inputs
exchangeID.generate_r1cs_witness(pb, block.exchangeID);
merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore);
merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter);
withdrawalBlockHashStart.generate_r1cs_witness(pb, block.startHash);
startIndex.generate_r1cs_witness(pb, block.startIndex);
count.generate_r1cs_witness(pb, block.count);
// printBits("start hash input: 0x", depositBlockHashStart.get_bits(pb), true);
// Shutdown
bShutdownMode.generate_r1cs_witness();
// Withdrawals
assert(withdrawals.size() == hashers.size());
#ifdef MULTICORE
#pragma omp parallel for
#endif
for(unsigned int i = 0; i < block.withdrawals.size(); i++)
{
withdrawals[i].generate_r1cs_witness(block.withdrawals[i]);
}
// Cannot be done in parallel
for(unsigned int i = 0; i < block.withdrawals.size(); i++)
{
hashers[i].generate_r1cs_witness();
}
// printBits("WithdrawBlockHash: 0x", hashers.back().result().bits.get_bits(pb));
// Public data
publicData.generate_r1cs_witness();
return true;
}
bool generateWitness(const json& input) override
{
return generateWitness(input.get<Loopring::OnchainWithdrawalBlock>());
}
BlockType getBlockType() override
{
return BlockType::OnchainWithdrawal;
}
unsigned int getBlockSize() override
{
return numWithdrawals;
}
void printInfo() override
{
std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numWithdrawals) << "/onchain withdrawal)" << std::endl;
}
};
}
#endif
|
instrument.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
const unsigned int polybench_papi_eventlist[] = {
#include "papi_counters.list"
0
};
int polybench_papi_eventset;
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 8192
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
static
double rtclock()
{
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc(cs, sizeof(double));
int i;
double tmp = 0.0;
#pragma omp parallel for
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max(SCHED_FIFO);
sched_setscheduler(0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max(SCHED_OTHER);
sched_setscheduler(0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf(stdout,"%-40s FAILED\nLine # %d\n", file, line);
else {
fprintf(stdout,"%-40s SKIPPED\n", file);
fprintf(stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS) {
sprintf(buf, "System error in %s", call);
perror(buf);
} else if (retval > 0) {
fprintf(stdout,"Error: %s\n", call);
} else if (retval == 0) {
fprintf(stdout,"Error: %s\n", call);
} else {
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror(retval, errstring, PAPI_MAX_STR_LEN);
fprintf(stdout,"Error in %s: %s\n", call, errstring);
}
fprintf(stdout,"\n");
if ( PAPI_is_initialized() ) PAPI_shutdown();
exit(1);
}
void polybench_papi_init()
{
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init(PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail(__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset(&polybench_papi_eventset)) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_create_eventset", retval);
}
void polybench_papi_close()
{
int retval;
if ((retval = PAPI_destroy_eventset(&polybench_papi_eventset)) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized())
PAPI_shutdown();
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name(polybench_papi_eventlist[evid], descr);
if (PAPI_add_event(polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
return 1;
if (PAPI_get_event_info(polybench_papi_eventlist[evid], &evinfo) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start(polybench_papi_eventset)) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_start", retval);
return 0;
}
void polybench_papi_stop_counter(int evid)
{
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read(polybench_papi_eventset, &values[0])) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop(polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_remove_event", retval);
}
void polybench_papi_print()
{
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
printf ("%llu ", polybench_papi_values[evid]);
printf ("\n");
}
#endif // ! POLYBENCH_PAPI
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments();
polybench_t_start = rtclock();
}
void polybench_timer_stop()
{
polybench_t_end = rtclock();
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler();
#endif
}
void polybench_timer_print()
{
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
|
packet-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file packet-inl.h
* \brief Generic packet vectorization code
*/
#ifndef MSHADOW_PACKET_INL_H_
#define MSHADOW_PACKET_INL_H_
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <stdlib.h>
#else
#include <malloc.h>
#endif
#include "./base.h"
#include "./tensor.h"
#include "./expression.h"
namespace mshadow {
/*! \brief namespace of packet math*/
namespace packet {
enum PacketArch {
kPlain,
kSSE2,
};
#if MSHADOW_USE_SSE
#define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kSSE2
#else
#define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kPlain
#endif
// whether packet operator is enabled.
/*!
* \brief Generic packet type
* \tparam DType The data type of the packet.
* \tparam Arch the Arch of the packet.
*/
template<typename DType, PacketArch Arch = MSHADOW_DEFAULT_PACKET>
struct Packet;
template<PacketArch Arch>
struct AlignBytes {
static const index_t value = 4;
};
} // namespace packet
} // namespace mshadow
namespace mshadow {
namespace packet {
/*!
* \brief analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells
* \param out_pitch output parameter, the actuall space allocated for each line
* \param lspace number of cells required for each line
* \param num_line number of lines to be allocated
*/
inline void* AlignedMallocPitch(size_t *out_pitch,
size_t lspace,
size_t num_line) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t mask = (1 << bits) - 1;
size_t pitch = ((lspace + mask) >> bits) << bits;
*out_pitch = pitch;
#ifdef _MSC_VER
void *res = _aligned_malloc(pitch * num_line, 1 << bits);
#else
void *res;
int ret = posix_memalign(&res, 1 << bits, pitch * num_line);
CHECK_EQ(ret, 0) << "AlignedMallocPitch failed";
#endif
if (res == NULL) {
LOG(FATAL) << "AlignedMallocPitch failed";
}
#if __GNUC__ >= 6
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
return res;
#pragma GCC diagnostic pop
}
/*!
* \brief free aligned space
* \param ptr pointer to space to be freed
*/
inline void AlignedFree(void *ptr) {
#ifdef _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
/*! \brief check if a pointer is aligned */
template<PacketArch Arch>
inline bool CheckAlign(size_t pitch) {
const index_t bits = AlignBytes<Arch>::value;
return !(pitch & ((1 << bits) - 1));
}
/*! \brief check if a pointer is aligned */
template<PacketArch Arch>
inline bool CheckAlign(void *ptr) {
return CheckAlign<Arch>(reinterpret_cast<size_t>(ptr));
}
/*!
* \brief get upper bound of aligned index of size
* \param size size of the array
* \param fsize size of float
*/
template<typename DType, PacketArch Arch>
inline index_t UpperAlign(index_t size) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t mask = (1 << bits) - 1;
const index_t fsize = sizeof(DType);
return (((size * fsize + mask) >> bits) << bits) / fsize;
}
/*!
* \brief get lower bound of aligned index of size
* \param size size of the array
* \param fsize size of float
*/
template<typename DType, PacketArch Arch>
inline index_t LowerAlign(index_t size) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t fsize = sizeof(DType);
return (((size * fsize) >> bits) << bits) / fsize;
}
/*!
* \brief generic Packet operator
* \tparam OP The operator
* \tparam DType The data type
* \tparam Arch The architecture.
*/
template<typename OP, typename DType, PacketArch Arch>
struct PacketOp {
static const bool kEnabled = false;
};
// specialization of operators
template<typename DType, PacketArch Arch>
struct PacketOp<op::plus, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs + rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::minus, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs - rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::mul, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs * rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::div, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs / rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::identity, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& src) {
return src;
}
};
// savers to do storage
template<typename SV, typename TFloat, PacketArch Arch>
struct Saver{
MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) {
Packet<TFloat, Arch> lhs = Packet<TFloat, Arch>::Load(dst);
Packet<TFloat, Arch> ans = PacketOp<typename SV::OPType, TFloat, Arch>::Map(lhs, src);
ans.Store(dst);
}
};
template<typename TFloat, PacketArch Arch>
struct Saver<sv::saveto, TFloat, Arch> {
MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) {
src.Store(dst);
}
};
} // namespace packet
} // namespace mshadow
#include "packet/plain-inl.h"
#if MSHADOW_USE_SSE && !defined(__CUDACC__)
#include "packet/sse-inl.h"
#endif
namespace mshadow {
namespace expr {
typedef packet::PacketArch PacketArch;
// same as plan, but use packet
template<typename ExpType, typename DType, PacketArch Arch>
class PacketPlan {
public:
/*!
* \brief evaluate the expression at index [y][x],
* x will be aligned to Packet<DType, Arch>::Size()
*/
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const;
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const;
};
template <typename Device, int dim, typename DType, PacketArch Arch>
class PacketPlan<Tensor<Device, dim, DType>, DType, Arch> {
public:
explicit PacketPlan(const Tensor<Device, dim, DType> &t)
:dptr_(t.dptr_), stride_(t.stride_) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::Packet<DType, Arch>::Load(&dptr_[y * stride_ + x]);
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return dptr_[y * stride_ + x];
}
private:
const DType *dptr_;
index_t stride_;
};
template<typename DType, PacketArch Arch>
class PacketPlan<ScalarExp<DType>, DType, Arch> {
public:
explicit PacketPlan(DType scalar) : scalar_(scalar) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::Packet<DType, Arch>::Fill(scalar_);
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return scalar_;
}
private:
DType scalar_;
};
template<typename OP, typename TA, typename TB, int etype, typename DType, PacketArch Arch>
class PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> {
public:
PacketPlan(const PacketPlan<TA, DType, Arch> &lhs, const PacketPlan<TB, DType, Arch> &rhs)
: lhs_(lhs), rhs_(rhs) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::PacketOp<OP, DType, Arch>::Map(lhs_.EvalPacket(y, x), rhs_.EvalPacket(y, x));
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return OP::Map(lhs_.Eval(y, x), rhs_.Eval(y, x));
}
private:
PacketPlan<TA, DType, Arch> lhs_;
PacketPlan<TB, DType, Arch> rhs_;
};
template<typename OP, typename TA, int etype, typename DType, PacketArch Arch>
class PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> {
public:
PacketPlan(const PacketPlan<TA, DType, Arch> &src) : src_(src) {}
MSHADOW_CINLINE packet::Packet<DType> EvalPacket(index_t y, index_t x) const {
return packet::PacketOp<OP, DType, Arch>::Map(src_.EvalPacket(y, x));
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return OP::Map(src_.Eval(y, x));
}
private:
PacketPlan<TA, DType, Arch> src_;
};
template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype>
inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>
MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e);
template<PacketArch Arch, typename DType>
inline PacketPlan<ScalarExp<DType>, DType, Arch> MakePacketPlan(const ScalarExp<DType> &e) {
return PacketPlan<ScalarExp<DType>, DType, Arch>(e.scalar_);
}
template<PacketArch Arch, typename T, typename DType>
inline PacketPlan<T, DType, Arch> MakePacketPlan(const RValueExp<T, DType> &e) {
return PacketPlan<T, DType, Arch>(e.self());
}
template<PacketArch Arch, typename T, int dim, typename DType>
inline PacketPlan<T, DType, Arch>
MakePacketPlan(const MakeTensorExp<T, cpu, dim, DType> &e) {
return PacketPlan<T, DType, Arch>(e.real_self());
}
template<PacketArch Arch, typename OP, typename TA, typename DType, int etype>
inline PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>
MakePacketPlan(const UnaryMapExp<OP, TA, DType, etype> &e) {
return PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.src_));
}
template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype>
inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>
MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e) {
return PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>,
DType, Arch>(MakePacketPlan<Arch>(e.lhs_), MakePacketPlan<Arch>(e.rhs_));
}
/*!
* \brief static check packet enable
*
* \tparam Device the type of Device
* \tparam dim dimension of the tensor
* \tparam E expression
*/
template<typename E, PacketArch Arch>
struct PacketCheck{
static const bool kPass = false;
};
template<PacketArch Arch>
struct PacketCheck<float, Arch> {
static const bool kPass = true;
};
template<PacketArch Arch>
struct PacketCheck<double, Arch> {
static const bool kPass = true;
};
template<typename DType, PacketArch Arch>
struct PacketCheck<ScalarExp<DType>, Arch> {
static const bool kPass = PacketCheck<DType, Arch>::kPass;
};
template<int dim, typename DType, PacketArch Arch>
struct PacketCheck<Tensor<cpu, dim, DType>, Arch> {
static const bool kPass = PacketCheck<DType, Arch>::kPass;
};
template<typename OP, typename TA, typename DType, int etype, PacketArch Arch>
struct PacketCheck<UnaryMapExp<OP, TA, DType, etype>, Arch> {
static const bool kPass = PacketCheck<TA, Arch>::kPass &&
packet::PacketOp<OP, DType, Arch>::kEnabled;
};
template<typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch>
struct PacketCheck< BinaryMapExp<OP, TA, TB, DType, etype>, Arch> {
static const bool kPass = packet::PacketOp<OP, DType, Arch>::kEnabled &&
PacketCheck<TA, Arch>::kPass && PacketCheck<TB, Arch>::kPass;
};
//----------------------------------------------------
// Check if data is aligned and allow packet operation
//----------------------------------------------------
template<int dim, typename E, PacketArch Arch>
struct PacketAlignCheck {
inline static bool Check(const E &exp) {
return false;
}
};
template<int dim, typename DType, PacketArch Arch>
struct PacketAlignCheck<dim, ScalarExp<DType>, Arch> {
inline static bool Check(const ScalarExp<DType> &exp) {
return true;
}
};
template<int dim, typename DType, PacketArch Arch>
struct PacketAlignCheck<dim, Tensor<cpu, dim, DType>, Arch> {
inline static bool Check(const Tensor<cpu, dim, DType> &t) {
return packet::CheckAlign<Arch>(t.dptr_) &&
packet::CheckAlign<Arch>(t.stride_ * sizeof(DType));
}
};
template<int dim, typename OP, typename TA, typename DType, int etype, PacketArch Arch>
struct PacketAlignCheck<dim, UnaryMapExp<OP, TA, DType, etype>, Arch> {
inline static bool Check(const UnaryMapExp<OP, TA, DType, etype> &t) {
return PacketAlignCheck<dim, TA, Arch>::Check(t.src_);
}
};
template<int dim, typename OP, typename TA, typename TB,
typename DType, int etype, PacketArch Arch>
struct PacketAlignCheck<dim, BinaryMapExp<OP, TA, TB, DType, etype>, Arch> {
inline static bool Check(const BinaryMapExp<OP, TA, TB, DType, etype> &t) {
return PacketAlignCheck<dim, TA, Arch>::Check(t.lhs_) &&
PacketAlignCheck<dim, TB, Arch>::Check(t.rhs_);
}
};
/*!
* \brief use PacketPlan to compute result
*/
template<typename SV, typename E, int dim, typename DType, PacketArch Arch>
inline void MapPacketPlan(Tensor<cpu, dim, DType> _dst,
const expr::PacketPlan<E, DType, Arch>& plan) {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
const index_t xlen = packet::LowerAlign<DType, Arch>(dst.size(1));
const size_t packetSize = packet::Packet<DType, Arch>::size;
#ifndef __CUDACC__
#pragma omp parallel for
#endif
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t x = 0; x < xlen; x += packetSize) {
packet::Saver<SV, DType, Arch>::Save(&dst[y][x], plan.EvalPacket(y, x));
}
for (index_t x = xlen; x < dst.size(1); ++x) {
SV::Save(dst[y][x], plan.Eval(y, x));
}
}
}
} // namespace expr
} // namespace mshadow
#endif // MSHADOW_PACKET_INL_H_
|
mttkrp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "hicoo.h"
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
int spt_MTTKRPHiCOO_3D(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
int spt_MTTKRPHiCOO_3D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (nmodes+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptMTTKRPHiCOO(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(spt_MTTKRPHiCOO_3D(hitsr, mats, mats_order, mode) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
sptValueVector scratch; // Temporary array
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptNewValueVector(&scratch, R, R);
sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord));
sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord));
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = hitsr->binds[m].data[b];
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Element indices */
for(sptIndex m=0; m<nmodes; ++m)
ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z];
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptMatrix * times_mat = mats[times_mat_index];
sptIndex tmp_i = ele_coord[times_mat_index];
sptValue const entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] = entry * times_mat->values[tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
times_mat = mats[times_mat_index];
tmp_i = ele_coord[times_mat_index];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] *= times_mat->values[tmp_i * stride + r];
}
}
sptIndex const mode_i = ele_coord[mode];
for(sptIndex r=0; r<R; ++r) {
mvals[mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
free(block_coord);
free(ele_coord);
sptFreeValueVector(&scratch);
return 0;
}
/**
* Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type.
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (nmodes+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptMTTKRPHiCOO_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(spt_MTTKRPHiCOO_3D_MatrixTiling(hitsr, mats, mats_order, mode) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
sptValueVector scratch; // Temporary array
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptNewValueVector(&scratch, R, R);
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
free(blocked_times_mat);
sptFreeValueVector(&scratch);
return 0;
}
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
int spt_MTTKRPHiCOO_3D(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2;
sptValue entry;
sptValue * restrict blocked_mvals;
sptValue * restrict blocked_times_mat_1;
sptValue * restrict blocked_times_mat_2;
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
for(sptIndex r=0; r<R; ++r) {
bmvals_row[r] += entry *
blocked_times_mat_1_row[r]
* blocked_times_mat_2_row[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
int spt_MTTKRPHiCOO_3D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2;
sptValue entry;
sptValue * restrict blocked_mvals;
sptValue * restrict blocked_times_mat_1;
sptValue * restrict blocked_times_mat_2;
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
bmvals_row[r] += entry *
blocked_times_mat_1_row[r]
* blocked_times_mat_2_row[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
|
arm_device.h | #ifndef ANAKIN2_SABER_ARM_DEVICES_H
#define ANAKIN2_SABER_ARM_DEVICES_H
#include <stdio.h>
#include <vector>
#include "saber/core/device.h"
#ifdef PLATFORM_ANDROID
#include <sys/syscall.h>
#include <unistd.h>
#define __NCPUBITS__ (8 * sizeof (unsigned long))
#define __CPU_SET(cpu, cpusetp) \
((cpusetp)->mask_bits[(cpu) / __NCPUBITS__] |= (1UL << ((cpu) % __NCPUBITS__)))
#define __CPU_ZERO(cpusetp) \
memset((cpusetp), 0, sizeof(cpu_set_t))
#endif
#if __APPLE__
#include "TargetConditionals.h"
#if TARGET_OS_IPHONE
#include <sys/types.h>
#include <sys/sysctl.h>
#include <mach/machine.h>
#define __IOS__
#endif
#endif
#ifdef USE_ARM_PLACE
static int arm_get_cpucount()
{
#ifdef PLATFORM_ANDROID
// get cpu count from /proc/cpuinfo
FILE* fp = fopen("/proc/cpuinfo", "rb");
if (!fp) {
return 1;
}
int count = 0;
char line[1024];
while (!feof(fp))
{
char* s = fgets(line, 1024, fp);
if (!s) {
break;
}
if (memcmp(line, "processor", 9) == 0) {
count++;
}
}
fclose(fp);
if (count < 1) {
count = 1;
}
return count;
#elif __IOS__
int count = 0;
size_t len = sizeof(count);
sysctlbyname("hw.ncpu", &count, &len, NULL, 0);
if (count < 1) {
count = 1;
}
return count;
#else
return 1;
#endif
}
static int arm_get_meminfo()
{
#ifdef PLATFORM_ANDROID
// get cpu count from /proc/cpuinfo
FILE* fp = fopen("/proc/meminfo", "rb");
if (!fp) {
return 1;
}
int memsize = 0;
char line[1024];
while (!feof(fp))
{
char* s = fgets(line, 1024, fp);
if (!s) {
break;
}
sscanf(s, "MemTotal: %d kB", &memsize);
}
fclose(fp);
return memsize;
#elif __IOS__
// to be implemented
return 0;
#endif
}
#ifdef PLATFORM_ANDROID
static int get_max_freq_khz(int cpuid)
{
// first try, for all possible cpu
char path[256];
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state",\
cpuid);
FILE* fp = fopen(path, "rb");
if (!fp)
{
// second try, for online cpu
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state",\
cpuid);
fp = fopen(path, "rb");
if (!fp)
{
// third try, for online cpu
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq",\
cpuid);
fp = fopen(path, "rb");
if (!fp) {
return -1;
}
int max_freq_khz = -1;
fscanf(fp, "%d", &max_freq_khz);
fclose(fp);
return max_freq_khz;
}
}
int max_freq_khz = 0;
while (!feof(fp))
{
int freq_khz = 0;
int nscan = fscanf(fp, "%d %*d", &freq_khz);
if (nscan != 1) {
break;
}
if (freq_khz > max_freq_khz) {
max_freq_khz = freq_khz;
}
}
fclose(fp);
return max_freq_khz;
}
static int arm_sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \
std::vector<int>& cpu_freq, std::vector<int>& cluster_ids) {
//const int cpu_count = cpuids.size();
if (cpu_count == 0) {
return 0;
}
//std::vector<int> cpu_max_freq_khz;
cpuids.resize(cpu_count);
cpu_freq.resize(cpu_count);
cluster_ids.resize(cpu_count);
for (int i = 0; i < cpu_count; i++)
{
int max_freq_khz = get_max_freq_khz(i);
//printf("%d max freq = %d khz\n", i, max_freq_khz);
cpuids[i] = i;
cpu_freq[i] = max_freq_khz / 1000;
}
// sort cpuid as big core first
// simple bubble sort
/*
for (int i = 0; i < cpu_count; i++)
{
for (int j = i+1; j < cpu_count; j++)
{
if (cpu_freq[i] < cpu_freq[j])
{
// swap
int tmp = cpuids[i];
cpuids[i] = cpuids[j];
cpuids[j] = tmp;
tmp = cpu_freq[i];
cpu_freq[i] = cpu_freq[j];
cpu_freq[j] = tmp;
}
}
}*/
// SMP
int mid_max_freq_khz = (cpu_freq.front() + cpu_freq.back()) / 2;
//if (mid_max_freq_khz == cpu_freq.back())
// return 0;
for (int i = 0; i < cpu_count; i++)
{
if (cpu_freq[i] >= mid_max_freq_khz) {
cluster_ids[i] = 0;
}
else{
cluster_ids[i] = 1;
}
}
return 0;
}
#endif // __ANDROID__
#ifdef __IOS__
static int sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \
std::vector<int>& cpu_freq, std::vector<int>& cluster_ids){
if (cpu_count == 0) {
return 0;
}
cpuids.resize(cpu_count);
cpu_freq.resize(cpu_count);
cluster_ids.resize(cpu_count);
for (int i = 0; i < cpu_count; ++i) {
cpuids[i] = i;
cpu_freq[i] = 1000;
cluster_ids[i] = 0;
}
}
#endif
#ifdef PLATFORM_ANDROID
static int set_sched_affinity(const std::vector<int>& cpuids)
{
// cpu_set_t definition
// ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity
typedef struct
{
unsigned long mask_bits[1024 / __NCPUBITS__];
}cpu_set_t;
// set affinity for thread
pid_t pid = gettid();
cpu_set_t mask;
__CPU_ZERO(&mask);
for (int i = 0; i < (int)cpuids.size(); i++)
{
__CPU_SET(cpuids[i], &mask);
}
int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask);
if (syscallret)
{
LOG(ERROR) << "syscall error " << syscallret;
return -1;
}
return 0;
}
void SetThreadAffinity(cpu_set_t mask) {
#if defined(__ANDROID__)
pid_t pid = gettid();
#else
pid_t pid = syscall(SYS_gettid);
#endif
int err = sched_setaffinity(pid, sizeof(mask), &mask);
if (err != 0) {
LOG(ERROR) << "set affinity error: " << strerror(errno);
}
}
static int set_cpu_affinity(const std::vector<int>& cpuids){
#ifdef USE_OPENMP
int num_threads = cpuids.size();
//omp_set_dynamic(0);
omp_set_num_threads(num_threads);
#if 0
// compute mask
cpu_set_t mask;
CPU_ZERO(&mask);
for (auto cpu_id : cpuids) {
CPU_SET(cpu_id, &mask);
}
#pragma omp parallel for
for (int i = 0; i < num_threads; ++i) {
SetThreadAffinity(mask);
LOG(INFO) << "Set affinity for OpenMP thread " << omp_get_thread_num()
<< "/" << omp_get_num_threads();
}
#else
std::vector<int> ssarets(num_threads, 0);
#pragma omp parallel for
for (int i = 0; i < num_threads; i++)
{
ssarets[i] = set_sched_affinity(cpuids);
}
for (int i = 0; i < num_threads; i++)
{
if (ssarets[i] != 0)
{
LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[i];
return -1;
}
}
#endif
#else
std::vector<int> cpuid1;
cpuid1.push_back(cpuids[0]);
int ssaret = set_sched_affinity(cpuid1);
if (ssaret != 0)
{
LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[0];
return -1;
}
#endif
return 0;
}
#endif //PLATFORN_ANDROID
#endif //USE_ARM_PLACE
#endif //ANAKIN2_SABER_ARM_DEVICES_H
|
GB_binop__lt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_uint64
// A.*B function (eWiseMult): GB_AemultB__lt_uint64
// A*D function (colscale): GB_AxD__lt_uint64
// D*A function (rowscale): GB_DxB__lt_uint64
// C+=B function (dense accum): GB_Cdense_accumB__lt_uint64
// C+=b function (dense accum): GB_Cdense_accumb__lt_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_uint64
// C=scalar+B GB_bind1st__lt_uint64
// C=scalar+B' GB_bind1st_tran__lt_uint64
// C=A+scalar GB_bind2nd__lt_uint64
// C=A'+scalar GB_bind2nd_tran__lt_uint64
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT64 || GxB_NO_LT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lt_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diamond_cmap.h | // This is the implementation using the connectivity map (c-map)
std::cout << "Running the c-map implementation\n";
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
#if 1
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : g.N(v0)) cmap[u] = 1;
for (auto v1 : g.N(v0)) {
if (v1 >= v0) break;
//uint64_t n = 0;
VertexSet y0y1;
for (auto u : g.N(v1)) {
#if 0
auto c1 = read_cycle();
auto ccode = cmap[u];
auto c2 = read_cycle();
if (nqueries[tid] < NUM_SAMPLES) {
auto tick = c2 - c1;
//std::cout << tick << "\n";
if (tick < 500) {
nticks[tid] += tick;
nqueries[tid] ++;
}
}
if (ccode == 1) y0y1.add(u);
#else
if (cmap[u] == 1) y0y1.add(u);
#endif
}
for (auto v2 : y0y1) {
for (auto v3 : y0y1) {
if (v3 >= v2) break;
counter ++;
}
}
//counter += n * (n-1) / 2;
}
for (auto u : g.N(v0)) cmap[u] = 0;
#else
for (auto v1 : g.N(v0)) {
if (v1 >= v0) break;
uint64_t n = intersect(g, v0, v1);
counter += n * (n-1) / 2;
}
#endif
}
|
GB_unop__identity_uint64_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_uint8)
// op(A') function: GB (_unop_tran__identity_uint64_uint8)
// C type: uint64_t
// A type: uint8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_uint8)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
activation.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_ACTIVATION_H_
#define MACE_KERNELS_ACTIVATION_H_
#include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/core/types.h"
#include "mace/kernels/kernel.h"
namespace mace {
namespace kernels {
enum ActivationType {
NOOP = 0,
RELU = 1,
RELUX = 2,
PRELU = 3,
TANH = 4,
SIGMOID = 5
};
inline ActivationType StringToActivationType(const std::string type) {
if (type == "RELU") {
return ActivationType::RELU;
} else if (type == "RELUX") {
return ActivationType::RELUX;
} else if (type == "PRELU") {
return ActivationType::PRELU;
} else if (type == "TANH") {
return ActivationType::TANH;
} else if (type == "SIGMOID") {
return ActivationType::SIGMOID;
} else if (type == "NOOP") {
return ActivationType::NOOP;
} else {
LOG(FATAL) << "Unknown activation type: " << type;
}
return ActivationType::NOOP;
}
template <typename T>
void DoActivation(const T *input_ptr,
T *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
MACE_CHECK(DataTypeToEnum<T>::value != DataType::DT_HALF);
switch (type) {
case NOOP:
break;
case RELU:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max(input_ptr[i], static_cast<T>(0));
}
break;
case RELUX:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::min(std::max(input_ptr[i], static_cast<T>(0)),
static_cast<T>(relux_max_limit));
}
break;
case TANH:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template <typename T>
void PReLUActivation(const T *input_ptr,
const index_t outer_size,
const index_t input_chan,
const index_t inner_size,
const T *alpha_ptr,
T *output_ptr) {
#pragma omp parallel for collapse(3)
for (index_t i = 0; i < outer_size; ++i) {
for (index_t chan_idx = 0; chan_idx < input_chan; ++chan_idx) {
for (index_t j = 0; j < inner_size; ++j) {
index_t idx = i * input_chan * inner_size + chan_idx * inner_size + j;
if (input_ptr[idx] < 0) {
output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx];
} else {
output_ptr[idx] = input_ptr[idx];
}
}
}
}
}
template <DeviceType D, typename T>
class ActivationFunctor;
template <>
class ActivationFunctor<DeviceType::CPU, float> : OpKernel {
public:
ActivationFunctor(OpKernelContext *context,
ActivationType type,
float relux_max_limit)
: OpKernel(context),
activation_(type),
relux_max_limit_(relux_max_limit) {}
MaceStatus operator()(const Tensor *input,
const Tensor *alpha,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const float *input_ptr = input->data<float>();
float *output_ptr = output->mutable_data<float>();
if (activation_ == PRELU) {
MACE_CHECK_NOTNULL(alpha);
const float *alpha_ptr = alpha->data<float>();
const index_t outer_size = output->dim(0);
const index_t inner_size = output->dim(2) * output->dim(3);
PReLUActivation(input_ptr, outer_size, input->dim(1), inner_size,
alpha_ptr, output_ptr);
} else {
DoActivation(input_ptr, output_ptr, output->size(), activation_,
relux_max_limit_);
}
return MACE_SUCCESS;
}
private:
ActivationType activation_;
float relux_max_limit_;
};
#ifdef MACE_ENABLE_OPENCL
class OpenCLActivationKernel {
public:
virtual MaceStatus Compute(
OpKernelContext *context,
const Tensor *input,
const Tensor *alpha,
Tensor *output,
StatsFuture *future) = 0;
MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLActivationKernel);
};
template <typename T>
class ActivationFunctor<DeviceType::GPU, T> : OpKernel {
public:
ActivationFunctor(OpKernelContext *context,
ActivationType type,
T relux_max_limit);
MaceStatus operator()(const Tensor *input,
const Tensor *alpha,
Tensor *output,
StatsFuture *future);
private:
std::unique_ptr<OpenCLActivationKernel> kernel_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_ACTIVATION_H_
|
layer.h | /*
Copyright (c) 2014, Kai Klindworth
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef NEURAL_NETWORK_LAYER_H
#define NEURAL_NETWORK_LAYER_H
#include <random>
#include <vector>
#include <neural_network/blas_wrapper.h>
#include <stdexcept>
#include <omp.h>
#include <iterator>
#include <algorithm>
#include <cassert>
namespace neural_network
{
template<typename Iterator>
void uniform_init(Iterator begin, Iterator end, typename Iterator::value_type var)
{
static std::mt19937 gen;
std::uniform_real_distribution<> dist(-var, var);
for(Iterator it = begin; it != end; ++it)
*it = dist(gen);
}
template<typename T>
struct layer_thread_data
{
layer_thread_data(int in_dim, int out_dim, int weights, int bias) {
this->output_data.resize(out_dim);
this->gradient_data.resize(in_dim);
this->dW.resize(weights,0);
this->dB.resize(bias,0);
}
std::vector<T> output_data;
std::vector<T> gradient_data;
std::vector<T> dW, dB;
};
template<typename T>
class layer_base
{
public:
enum phase {Testing, Training};
layer_base(bool propagate_down, int in_dim, int out_dim, int weights, int bias_dim) : cthread_data(omp_get_max_threads(), layer_thread_data<T>(in_dim, out_dim, weights, bias_dim)) {
this->in_dim = in_dim;
this->out_dim = out_dim;
this->propagate_down = propagate_down;
this->weights.resize(weights);
this->weights_transposed.resize(weights);
this->bias.resize(bias_dim);
this->dW.resize(weights,0);
this->dB.resize(bias_dim,0);
this->Eg_w.resize(weights,0);
this->Ex_w.resize(weights,0);
this->Eg_b.resize(bias_dim,0);
this->Ex_b.resize(bias_dim,0);
//init_weights();
this->current_phase = phase::Testing;
regularize = false;
}
void set_phase(phase cphase)
{
this->current_phase = cphase;
}
layer_thread_data<T>& thread_data()
{
return cthread_data[thread_num()];
}
const layer_thread_data<T>& thread_data() const
{
return cthread_data[thread_num()];
}
inline int thread_num() const
{
return omp_get_thread_num();
}
inline int thread_max() const
{
return omp_get_max_threads();
}
const T* output() const
{
return thread_data().output_data.data();
}
const T* gradient() const
{
return thread_data().gradient_data.data();
}
std::vector<T>& get_weights()
{
return weights;
}
std::vector<T>& get_bias()
{
return bias;
}
std::vector<T>& get_weights_diff()
{
return thread_data().dW;
}
std::vector<T>& get_bias_diff()
{
return thread_data().dB;
}
int output_dimension() const
{
return out_dim;
}
int input_dimension() const
{
return in_dim;
}
void update_weights(int count = 1)
{
for(layer_thread_data<T>& cdata : cthread_data)
{
for(std::size_t i = 0; i < cdata.dW.size(); ++i)
dW[i] += cdata.dW[i];
for(std::size_t i = 0; i < cdata.dB.size(); ++i)
dB[i] += cdata.dB[i];
std::fill(cdata.dW.begin(), cdata.dW.end(), 0.0f);
std::fill(cdata.dB.begin(), cdata.dB.end(), 0.0f);
}
update_weights_internal(count);
//regularize_weights();
transpose_weights();
}
void init_weights()
{
if(!weights.empty())
{
const T weight_base = 0.5 / std::sqrt(in_connectivity());
uniform_init(weights.begin(), weights.end(), weight_base);
transpose_weights();
}
uniform_init(bias.begin(), bias.end(), 0.5);
/*const T weight_base = 0.5 / std::sqrt(in_dim);
uniform_init(weights.begin(), weights.end(), weight_base);
transpose_weights();
uniform_init(bias.begin(), bias.end(), weight_base);*/
}
void save_weights(std::ostream& stream) const
{
//std::cout << "weights: " << this->weights.size() << ", bias: " << this->bias.size() << std::endl;
stream << this->name() << " " << this->weights.size() << " " << this->bias.size() << " ";
std::copy(this->weights.begin(), this->weights.end(), std::ostream_iterator<T>(stream, " "));
std::copy(this->bias.begin(), this->bias.end(), std::ostream_iterator<T>(stream, " "));
/*for(T cweight : this->weights)
stream << cweight << " ";
for(T cbias : this->bias)
stream << cbias << " ";*/
}
void load_weights(std::istream& stream)
{
std::size_t weight_count, bias_count;
std::string layername;
stream >> layername;
stream >> weight_count;
stream >> bias_count;
if( (weight_count != this->weights.size()) || (bias_count != this->bias.size()) || (layername != this->name()))
throw std::runtime_error("weightsfile doesn't fit");
for(T& cweight : this->weights)
stream >> cweight;
for(T& cbias : this->bias)
stream >> cbias;
}
virtual void forward_propagation(const T* bottom_data) = 0;
virtual void backward_propagation(const T* bottom_data, const T* top_gradient) = 0;
virtual bool trainable() const
{
return true;
}
virtual std::string name() const
{
return "layer_base";
}
virtual void regularize_weights() {}
virtual int in_connectivity() { return in_dim; }
protected:
void update_weights_internal(int count = 1)
{
T rho = 0.95;
T epsilon =1e-6;
auto rms = [=](T val) {
return std::sqrt(val+epsilon);
};
auto adadelta = [=](std::vector<T>& Eg, std::vector<T>& Ex, std::vector<T>& W, std::vector<T>& dW, T factor)
{
assert((Eg.size() == Ex.size()) && (Ex.size() == W.size()));
int iterations = Eg.size();
#pragma omp parallel for
for(int i = 0; i < iterations; ++i)
{
T cdW = dW[i] * factor;
dW[i] = 0;
Eg[i] = rho * Eg[i] + (1-rho) * cdW * cdW;
T dx = -rms(Ex[i]) / rms(Eg[i]) * cdW;
Ex[i] = rho * Ex[i]+(1-rho)*dx*dx;
W[i] += dx;
}
};
adadelta(Eg_w, Ex_w, weights, dW, 1.0/count);
adadelta(Eg_b, Ex_b, bias, dB, 1.0/count);
}
void transpose_weights()
{
if((int)this->weights.size() != (this->in_dim * this->out_dim) )
return;
T* dst = this->weights_transposed.data();
for(int i = 0; i < this->in_dim; ++i)
{
const T* weight_col = &(this->weights[i]);
for(int j = 0; j < out_dim; ++j)
{
*dst++ = *weight_col;
weight_col += this->in_dim;
}
}
}
void abs_renorm(T* data, int n, T desired, int stride = 1)
{
T sum = 0;
for(int i = 0; i < n; ++i)
sum += std::abs(data[i*stride]);
sum /= n;
if(sum > desired)
blas::scale(desired/sum, data, n, stride);
}
void max_renorm(T* data, int n, T allowed, int stride = 1)
{
T current = 0;
for(int i = 0; i < n; ++i)
current = std::max(std::abs(data[i*stride]), current);
if(allowed > current)
blas::scale(allowed/current, data, n, stride);
}
phase current_phase;
int in_dim;
int out_dim;
std::vector<T> bias;
std::vector<T> weights;
std::vector<T> weights_transposed;
std::vector<T> Eg_w, Ex_w, Eg_b, Ex_b;
std::vector<T> dW, dB;
std::vector<layer_thread_data<T>> cthread_data;
bool propagate_down;
bool regularize;
};
template<typename T>
class fully_connected_layer : public layer_base<T>
{
public:
fully_connected_layer(bool propagate_down, int in_dim, int out_dim) : layer_base<T>(propagate_down, in_dim, out_dim, in_dim*out_dim, out_dim)
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
blas::gemv(cdata.output_data.data(), this->weights.data(), false, this->out_dim, this->in_dim, bottom_data);
for(int i = 0; i < this->out_dim; ++i)
{
cdata.output_data[i] += this->bias[i];
}
}
void backward_propagation(const T* bottom_data, const T* top_gradient) override
{
layer_thread_data<T>& cdata = this->thread_data();
//propagate down
if(this->propagate_down)
{
//blas_gemv(cdata.gradient_data.data(), this->weights.data(), true, this->out_dim, this->in_dim, top_gradient);
blas::gemv(cdata.gradient_data.data(), this->weights_transposed.data(), false, this->in_dim, this->out_dim, top_gradient);
}
blas::ger(cdata.dW.data(), top_gradient, this->out_dim, bottom_data, this->in_dim);
for(int i = 0; i < this->out_dim; ++i)
cdata.dB[i] += top_gradient[i];
}
std::string name() const override
{
return "fully_connected_layer";
}
void regularize_weights() override
{
for(int i = 0; i < this->out_dim; ++i)
this->abs_renorm(this->weights.data() + i*this->in_dim, this->in_dim, 1.0/this->in_dim);
}
};
template<typename T>
class relu_layer : public layer_base<T>
{
public:
relu_layer(bool propagate_down, int dim) : layer_base<T>(propagate_down, dim, dim, 0, 0) {}
void forward_propagation(const T *bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
for(int i = 0; i < this->in_dim; ++i)
cdata.output_data[i] = std::max(bottom_data[i], static_cast<T>(0));
}
void backward_propagation(const T* bottom_data, const T* top_gradient) override
{
if(this->propagate_down)
{
layer_thread_data<T>& cdata = this->thread_data();
for(int i = 0; i < this->in_dim; ++i)
cdata.gradient_data[i] = bottom_data[i] > 0 ? top_gradient[i] : 0;
}
}
bool trainable() const override
{
return false;
}
std::string name() const override
{
return "relu_layer";
}
};
template<typename T>
class dropout_layer : public layer_base<T>
{
public:
typedef layer_base<T> Base;
dropout_layer(bool propagate_down, int dim) : layer_base<T>(propagate_down, dim, dim, 0, 0), dropout_rate(0.25), mask(this->thread_max(), std::vector<unsigned char>(dim, 1)), rng(this->thread_max())
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
std::vector<unsigned char>& cmask = mask[this->thread_num()];
std::mt19937& crng = rng[this->thread_num()];
if(this->current_phase == Base::phase::Training)
{
//init mask
std::bernoulli_distribution dist(1.0-dropout_rate);
for(auto& centry : cmask)
centry = dist(crng);
/*std::copy(bottom_data, bottom_data + this->in_dim, std::ostream_iterator<T>(std::cout, ", "));
std::cout << std::endl;
std::copy(mask.begin(), mask.end(), std::ostream_iterator<int>(std::cout, ", "));
std::cout << std::endl;*/
//fprop
for(int i = 0; i < this->in_dim; ++i)
cdata.output_data[i] = bottom_data[i] * cmask[i];
//std::copy(cdata.output_data.begin(), cdata.output_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
else
{
double factor = 1.0-dropout_rate;
for(int i = 0; i < this->in_dim; ++i)
cdata.output_data[i] = bottom_data[i] * factor;
}
}
void backward_propagation(const T*, const T* top_gradient) override
{
if(this->propagate_down)
{
layer_thread_data<T>& cdata = this->thread_data();
const std::vector<unsigned char>& cmask = mask[this->thread_num()];
for(int i = 0; i < this->in_dim; ++i)
cdata.gradient_data[i] = top_gradient[i] * cmask[i];
}
}
bool trainable() const override
{
return false;
}
std::string name() const override
{
return "dropout_layer";
}
protected:
T dropout_rate;
std::vector<std::vector<unsigned char>> mask;
std::vector<std::mt19937> rng;
};
template<typename T>
class softmax_output_layer : public layer_base<T>
{
public:
softmax_output_layer(bool propagate_down, int in_dim) : layer_base<T>(propagate_down, in_dim, in_dim, 0, 0), temp(this->thread_max(), std::vector<T>(in_dim))
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
std::vector<T>& ctemp = temp[this->thread_num()];
T current_max = *(std::max_element(bottom_data, bottom_data + this->out_dim));
for(int i = 0; i < this->out_dim; ++i)
ctemp[i] = std::exp(std::min(bottom_data[i] - current_max, static_cast<T>(340)));
//add final activation (softmax)
T sum = 0.0;
for(int i = 0; i < this->out_dim; ++i)
sum += ctemp[i];
for(int i = 0; i < this->out_dim; ++i)
cdata.output_data[i] = ctemp[i]/sum;
}
void backward_propagation(const T*, const T* gt) override
{
layer_thread_data<T>& cdata = this->thread_data();
//T error_sum = 0;
//std::cout << "backprop softmax" << std::endl;
for(int i = 0; i < this->out_dim; ++i)
{
//std::cout << cdata.output_data[i] << " vs " << gt[i] << std::endl;
cdata.gradient_data[i] = cdata.output_data[i] - gt[i];
//error_sum += std::abs(cdata.gradient_data[i]);
}
//std::cout << "error_sum: " << error_sum << std::endl;
//std::copy(this->gradient_data.begin(), this->gradient_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
bool trainable() const override
{
return false;
}
std::string name() const override
{
return "softmax_output_layer";
}
private:
std::vector<std::vector<T>> temp;
};
template<typename T>
class tanh_output_layer : public layer_base<T>
{
public:
tanh_output_layer(bool propagate_down, int in_dim) : layer_base<T>(propagate_down, in_dim, in_dim, 0, 0)
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
for(int i = 0; i < this->out_dim; ++i)
{
//const T exp2 = std::exp(2*std::min(static_cast<T>(300),bottom_data[i]));
//cdata.output_data[i] = (exp2 - 1)/(exp2 + 1);
cdata.output_data[i] = std::tanh(bottom_data[i]);
}
}
void backward_propagation(const T*, const T* gt) override
{
layer_thread_data<T>& cdata = this->thread_data();
//T error_sum = 0;
//std::cout << "backprop tanh" << std::endl;
for(int i = 0; i < this->out_dim; ++i)
{
//std::cout << cdata.output_data[i] << " vs " << gt[i] << std::endl;
cdata.gradient_data[i] = (cdata.output_data[i] - gt[i]) * (1- cdata.output_data[i]*cdata.output_data[i]);
//std::cout << i << ": " << (cdata.output_data[i] - gt[i]) * (1- cdata.output_data[i]*cdata.output_data[i]) << std::endl;
//error_sum += std::abs(cdata.gradient_data[i]);
}
//std::cout << "error_sum: " << error_sum << std::endl;
//std::copy(this->gradient_data.begin(), this->gradient_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
bool trainable() const override
{
return false;
}
std::string name() const override
{
return "tanh_output_layer";
}
};
template<typename T>
class transpose_vector_connected_layer : public layer_base<T>
{
public:
transpose_vector_connected_layer(bool propagate_down, int in_dim, int out_dim, int vectorsize, int passthrough) :
layer_base<T>(propagate_down, in_dim, (in_dim - passthrough)/vectorsize*out_dim + passthrough, vectorsize*out_dim, out_dim), channels_out(out_dim), vectorsize(vectorsize), passthrough(passthrough)
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
int channels_in = (this->in_dim - passthrough)/vectorsize;
blas::gemm(cdata.output_data.data(), this->weights.data(), false, channels_out, vectorsize, bottom_data, true, channels_in, vectorsize);
T* coutdata = cdata.output_data.data();// + channels_out*channels_in;
for(int j = 0; j < channels_out; ++j)
{
T cbias = this->bias[j];
for(int i = 0; i < channels_in; ++i)
{
*coutdata++ += cbias;
}
}
const T* in_data = &(bottom_data[channels_in*vectorsize]);
std::copy(in_data, in_data + passthrough, coutdata);
//std::copy(this->output_data.begin(), this->output_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
void backward_propagation(const T* bottom_data, const T* top_gradient) override
{
layer_thread_data<T>& cdata = this->thread_data();
int channels_in = (this->in_dim - passthrough)/vectorsize;
if(this->propagate_down)
blas::gemm(cdata.gradient_data.data(), top_gradient, true, channels_out, channels_in, this->weights.data(), false, channels_out, vectorsize);
blas::gemm(cdata.dW.data(), top_gradient, false, channels_out, channels_in, bottom_data, false, channels_in, vectorsize, 1.0, 1.0);
const T* cgradient = top_gradient;
for(int i = 0; i < this->channels_out; ++i)
{
T sum = 0;
for(int j = 0; j < channels_in; ++j)
sum += *cgradient++;
cdata.dB[i] += sum;
}
}
std::string name() const override
{
return "transpose_vector_connected_layer";
}
int in_connectivity() override
{
return vectorsize;
}
void regularize_weights() override
{
for(int i = 0; i < channels_out; ++i)
this->abs_renorm(this->weights.data() + i*vectorsize, vectorsize, 1.0/vectorsize);
}
protected:
int channels_out, vectorsize, passthrough;
};
/**
* Splts the input values into vectors oof size vectorsize. Each vector will have out_dim neurons. The weights of the neurons will be shared between the different vectors
*/
template<typename T>
class vector_connected_layer : public layer_base<T>
{
public:
vector_connected_layer(bool propagate_down, int in_dim, int out_dim, int vectorsize, int passthrough) :
layer_base<T>(propagate_down, in_dim, (in_dim - passthrough)/vectorsize*out_dim + passthrough, vectorsize*out_dim, out_dim), channels_out(out_dim), vectorsize(vectorsize), passthrough(passthrough)
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
int channels_in = (this->in_dim - passthrough)/vectorsize;
//blas_gemm(cdata.output_data.data(), this->weights.data(), false, channels_out, vectorsize, bottom_data, true, channels_in, vectorsize);
blas::gemm(cdata.output_data.data(), bottom_data, false, channels_in, vectorsize, this->weights.data(), false, vectorsize, channels_out);
T* coutdata = cdata.output_data.data();// + channels_out*channels_in;
for(int i = 0; i < channels_in; ++i)
{
for(int j = 0; j < channels_out; ++j)
*coutdata++ += this->bias[j];
}
const T* in_data = &(bottom_data[channels_in*vectorsize]);
std::copy(in_data, in_data + passthrough, coutdata);
//std::copy(this->output_data.begin(), this->output_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
void backward_propagation(const T* bottom_data, const T* top_gradient) override
{
layer_thread_data<T>& cdata = this->thread_data();
int channels_in = (this->in_dim - passthrough)/vectorsize;
if(this->propagate_down)
//blas_gemm(cdata.gradient_data.data(), top_gradient, true, channels_out, channels_in, this->weights.data(), false, channels_out, vectorsize);
blas::gemm(cdata.gradient_data.data(), top_gradient, false, channels_in, channels_out, this->weights.data(), true, vectorsize, channels_out);
//blas_gemm(cdata.dW.data(), top_gradient, false, channels_out, channels_in, bottom_data, false, channels_in, vectorsize, 1.0, 1.0);
blas::gemm(cdata.dW.data(), bottom_data, true, channels_in, vectorsize, top_gradient, false, channels_in, channels_out, 1.0, 1.0);
const T* cgradient = top_gradient;
for(int j = 0; j < channels_in; ++j)
{
for(int i = 0; i < this->channels_out; ++i)
cdata.dB[i] += *cgradient++;
}
}
std::string name() const override
{
return "vector_connected_layer";
}
void regularize_weights() override
{
for(int i = 0; i < channels_out; ++i)
this->abs_renorm(this->weights.data() + i, vectorsize, 1.0/vectorsize, channels_out);
}
int in_connectivity() override
{
return vectorsize;
}
protected:
int channels_out, vectorsize, passthrough;
};
/**
* Splits the input values into rows. You can specifiy the number of output values per row. Each of those neurons only connects to the input values of a row. The total output dimension is: neurons per row * rows
* E.g input: 100, rowsize: 20, neurons per row: 10, means that there are five rows, each of them has ten neurons
*/
template<typename T>
class row_connected_layer : public layer_base<T>
{
public:
row_connected_layer(bool propagate_down, int in_dim, int neurons_per_row, int rowsize, int passthrough) :
layer_base<T>(propagate_down, in_dim, (in_dim - passthrough)/rowsize*neurons_per_row + passthrough, (in_dim - passthrough)*neurons_per_row, (in_dim - passthrough)/rowsize*neurons_per_row), rowsize(rowsize), passthrough(passthrough), per_row_output(neurons_per_row)
{}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
const int row_count = (this->in_dim - passthrough)/rowsize;
const int regular_output = per_row_output * row_count;
for(int i = 0; i < row_count; ++i)
{
int offset = i*rowsize;
blas::gemv(cdata.output_data.data() + i*per_row_output, this->weights.data() + offset*per_row_output, false, per_row_output, rowsize, bottom_data+offset);
}
for(int i = 0; i < regular_output; ++i)
{
cdata.output_data[i] += this->bias[i];
}
const T* in_data = &(bottom_data[regular_output]);
std::copy(in_data, in_data + passthrough, cdata.output_data.data() + regular_output);
//std::copy(this->output_data.begin(), this->output_data.end(), std::ostream_iterator<T>(std::cout, ", "));
//std::cout << std::endl;
}
void backward_propagation(const T* bottom_data, const T* top_gradient) override
{
layer_thread_data<T>& cdata = this->thread_data();
const int row_count = (this->in_dim - passthrough)/rowsize;
const int regular_output = per_row_output * row_count;
//propagate down
if(this->propagate_down)
{
for(int i = 0; i < row_count; ++i)
{
int offset = i * rowsize;
blas::gemv(cdata.gradient_data.data()+offset, this->weights.data()+offset*per_row_output, true, per_row_output, rowsize, top_gradient + i*per_row_output);
}
}
for(int i = 0; i < row_count; ++i)
{
int offset = i * rowsize;
blas::ger(cdata.dW.data() + offset*per_row_output, top_gradient+i*per_row_output, per_row_output, bottom_data + offset, rowsize);
}
for(int i = 0; i < regular_output; ++i)
cdata.dB[i] += top_gradient[i];
}
std::string name() const override
{
return "row_connected_layer";
}
int in_connectivity() override
{
return rowsize;
}
void regularize_weights() override
{
const int row_count = (this->in_dim - passthrough)/rowsize;
const int neurons = per_row_output * row_count;
for(int i = 0; i < neurons; ++i)
this->abs_renorm(this->weights.data() + i*rowsize, rowsize, 1.0/rowsize);
}
protected:
int rowsize, passthrough, per_row_output;
};
template<typename T>
class vector_extension_layer : public layer_base<T>
{
public:
vector_extension_layer(bool propagate_down, int in_dim, int old_vectorsize, int vec_extension) :
layer_base<T>(propagate_down, in_dim, (in_dim - vec_extension)/old_vectorsize*(old_vectorsize+vec_extension), 0, 0), old_vectorsize(old_vectorsize), vector_extension(vec_extension)
{assert(!propagate_down);}
void forward_propagation(const T* bottom_data) override
{
layer_thread_data<T>& cdata = this->thread_data();
int vector_count = (this->in_dim - vector_extension)/old_vectorsize;
const T* extension_data = bottom_data + vector_count*old_vectorsize;
const T* old_data = bottom_data;
T* new_data = cdata.output_data.data();
for(int i = 0; i < vector_count; ++i)
{
std::copy(old_data, old_data + old_vectorsize, new_data);
new_data += old_vectorsize;
std::copy(extension_data, extension_data + vector_extension, new_data);
new_data += vector_extension;
old_data += old_vectorsize;
}
}
void backward_propagation(const T*, const T*) override
{
}
bool trainable() const override
{
return false;
}
std::string name() const override
{
return "vector_extension_layer";
}
protected:
int old_vectorsize, vector_extension;
};
}
#endif //NEURAL_NETWORK_LAYER_H
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2053.0
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *excetion)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
EdgeInfo
*p;
ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=CastDoubleToLong(ceil(edge.y1-0.5));
stop=CastDoubleToLong(floor(edge.y2+0.5));
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
ssize_t
x;
Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong(
ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor(
inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5));
x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
double
dx,
dy;
ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
Quantum
*magick_restrict q;
ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat*PerceptibleReciprocal(gradient->radius);
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const double pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
quantum=sizeof(**mvg_info->primitive_info);
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad*quantum+1.0;
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
if (extent == (double) CastDoubleToLong(extent))
{
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (
PrimitiveExtentPad*quantum));
(void) memset(*mvg_info->primitive_info,0,(size_t) (PrimitiveExtentPad*
quantum));
*mvg_info->extent=1;
mvg_info->offset=0;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
const char
*p;
ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=(size_t) PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) CastDoubleToLong(floor(GetDrawValue(
token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine(
&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(double)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < (ssize_t) polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < (ssize_t) polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
const PointInfo
*q;
EdgeInfo
*p;
ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
EdgeInfo
*p;
ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info,exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) &&
(y == CastDoubleToLong(ceil(primitive_info->point.y-0.5))))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
ssize_t
i,
x;
ssize_t
coordinates,
y;
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if (*clone_info->filename != '\0')
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5));
y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5));
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
double
cosine,
sine;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5*
MagickPI+MagickEpsilon)))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
PrimitiveInfo
*q;
ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
PrimitiveInfo
*p;
ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
double
dx,
dy;
ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.
q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid))))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p-
theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid)))))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
convolution_sgemm_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to1_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size / 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x12
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
img0 += size * 4;
tmpptr += 48;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
img0 += size * 4;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
img0 += size * 4;
tmpptr += 16;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(img0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
int nn_outch = outch / 4;
int remain_outch_start = nn_outch * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum8 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum9 = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _suma = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _sumb = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 96);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val2, (v4f32)__msa_splati_w(_w0123, 0));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum5 = __msa_fmadd_w(_sum5, _val2, (v4f32)__msa_splati_w(_w0123, 1));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum8 = __msa_fmadd_w(_sum8, _val2, (v4f32)__msa_splati_w(_w0123, 2));
_sum9 = __msa_fmadd_w(_sum9, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_suma = __msa_fmadd_w(_suma, _val1, (v4f32)__msa_splati_w(_w0123, 3));
_sumb = __msa_fmadd_w(_sumb, _val2, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 12;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
__msa_st_w((v4i32)_sum3, outptr1, 0);
__msa_st_w((v4i32)_sum4, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum5, outptr1 + 8, 0);
__msa_st_w((v4i32)_sum6, outptr2, 0);
__msa_st_w((v4i32)_sum7, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum8, outptr2 + 8, 0);
__msa_st_w((v4i32)_sum9, outptr3, 0);
__msa_st_w((v4i32)_suma, outptr3 + 4, 0);
__msa_st_w((v4i32)_sumb, outptr3 + 8, 0);
outptr0 += 12;
outptr1 += 12;
outptr2 += 12;
outptr3 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum3 = __msa_fmadd_w(_sum3, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum5 = __msa_fmadd_w(_sum5, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 8;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr1, 0);
__msa_st_w((v4i32)_sum3, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum4, outptr2, 0);
__msa_st_w((v4i32)_sum5, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum6, outptr3, 0);
__msa_st_w((v4i32)_sum7, outptr3 + 4, 0);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 4;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 8);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*tmpptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
kptr0 += 4;
}
outptr0[0] = _sum[0];
outptr1[0] = _sum[1];
outptr2[0] = _sum[2];
outptr3[0] = _sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
v4f32 _sum2 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
_sum2 = __msa_fmadd_w(_sum2, _w0, _val2);
tmpptr += 12;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
outptr0 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
tmpptr += 8;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
tmpptr += 4;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 16);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
tmpptr += 4;
kptr0 += 4;
}
sum0 += __msa_fhadd_w(_sum0);
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to1_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = pb-pa-maxk-inch/pa-outch/pb
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(4 * 4 * maxk, inch / 4, outch / 4 + outch % 4);
int q = 0;
for (; q + 3 < outch; q += 4)
{
float* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = k0.row(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
__msa_st_w((v4i32)_val, ptr, 0);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to1_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
mpi_mat_distribute.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "../splatt_mpi.h"
#include "../util.h"
/******************************************************************************
* PRIVATE DEFINES
*****************************************************************************/
static int const MSG_FINISHED = 0;
static int const MSG_TRYCLAIM = 1;
static int const MSG_MUSTCLAIM = 2;
static int const MSG_SENDBACK = 3;
static int const MSG_STANDBY = 4;
static int const MSG_UPDATES = 5;
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Computes a factor matrix distribution using a naive method. Each
* layer is defined as N / (p^0.3) slices is distributed in a contiguous
* fashion to all processes with nonzeros in that layer.
*
* @param rinfo The MPI rank information to fill in.
* @param tt The distributed tensor.
* @param perm The resulting permutation (set to identity).
*/
static void p_naive_mat_distribution(
rank_info * const rinfo,
sptensor_t const * const tt,
permutation_t * const perm)
{
MPI_Comm lcomm;
int lrank;
int npes;
for(idx_t m=0; m < tt->nmodes; ++m) {
lcomm = rinfo->layer_comm[m];
MPI_Comm_size(lcomm, &npes);
MPI_Comm_rank(lcomm, &lrank);
/* slice start/end of layer */
idx_t const start = rinfo->layer_starts[m];
idx_t const end = rinfo->layer_ends[m];
/* target nrows = layer_size / npes in a layer */
idx_t const psize = (end - start) / npes;
rinfo->mat_start[m] = lrank * psize;
rinfo->mat_end[m] = (lrank + 1) * psize;
/* account for being the last process in a layer */
if(lrank == npes - 1) {
rinfo->mat_end[m] = end - start;
}
}
/* set perm to identity */
for(idx_t m=0; m < tt->nmodes; ++m) {
for(idx_t i=0; i < tt->dims[m]; ++i) {
perm->perms[m][i] = i;
perm->iperms[m][i] = i;
}
}
}
/**
* @brief Generate a 'job' (an order to select rows for ownership) for a light
* rank in my lower. This function is the producer for the work queue
* and called by the root node in the layer.
*
* @param npes The number of ranks in the layer.
* @param lastp The most recent process to be given work.
* @param pvols An array of communication volumes.
* @param rinfo MPI rank information.
* @param comm The layer communicator.
* @param mustclaim A flag marking whether the last job was successful (whether
* any rows were claimed).
* @param left How many unclaimed rows are left.
*
* @return The selected rank.
*/
static int p_make_job(
int const npes,
int const lastp,
idx_t * const pvols,
rank_info * const rinfo,
MPI_Comm const comm,
int const mustclaim,
idx_t const left)
{
/* grab 2 smallest processes */
int p0 = (lastp+1) % npes;
int p1 = (lastp+2) % npes;
for(int p = 0; p < npes; ++p) {
if(pvols[p] < pvols[p0]) {
p1 = p0;
p0 = p;
}
}
rinfo->worksize = SS_MIN(pvols[p1] - pvols[p0], left);
if(rinfo->worksize == 0) {
rinfo->worksize = SS_MAX(left / npes, 1);
}
if(!mustclaim) {
MPI_Isend(&MSG_TRYCLAIM, 1, MPI_INT, p0, 0, comm, &(rinfo->req));
} else {
MPI_Isend(&MSG_MUSTCLAIM, 1, MPI_INT, p0, 0, comm, &(rinfo->req));
}
MPI_Isend(&(rinfo->worksize), 1, SPLATT_MPI_IDX, p0, 0, comm, &(rinfo->req));
for(int p=0; p < npes; ++p) {
if(p != p0) {
MPI_Isend(&MSG_STANDBY, 1, MPI_INT, p, 0, comm, &(rinfo->req));
}
}
return p0;
}
/**
* @brief Receive the latest claimed rows and update all other ranks.
*
* @param npes The number of ranks in my layer.
* @param pvols An array of process communication volumes.
* @param rinfo MPI rank information.
* @param comm The layer communicator.
* @param rowbuf A buffer to receive the claimed rows.
* @param left A pointer to update, we subtract the newly claimed rows from what
* is left.
*
* @return The number of rows that were claimed.
*/
static idx_t p_check_job(
int const npes,
idx_t * const pvols,
rank_info * const rinfo,
MPI_Comm const comm,
idx_t * const rowbuf,
idx_t * const left)
{
MPI_Probe(MPI_ANY_SOURCE, 0, comm, &(rinfo->status));
int const proc_up = rinfo->status.MPI_SOURCE;
idx_t nclaimed;
MPI_Recv(&nclaimed, 1, SPLATT_MPI_IDX, proc_up, 0, comm, &(rinfo->status));
MPI_Recv(rowbuf, nclaimed, SPLATT_MPI_IDX, proc_up, 0, comm, &(rinfo->status));
pvols[proc_up] += nclaimed;
*left -= nclaimed;
/* send new status message */
for(int p=0; p < npes; ++p) {
if(*left == 0) {
MPI_Isend(&MSG_FINISHED, 1, MPI_INT, p, 0, comm, &(rinfo->req));
} else {
MPI_Isend(&MSG_UPDATES, 1, MPI_INT, p, 0, comm, &(rinfo->req));
}
}
return nclaimed;
}
/**
* @brief Claim up to 'amt' rows that are unclaimed and found in my local
* tensor.
*
* @param amt The maximum (desired) rows to claim.
* @param inds Indices local to my tensor.
* @param localdim The size of 'inds'
* @param rinfo MPI rank information.
* @param claimed An array marking which rows have been claimed.
* @param layerdim The dimension of the layer.
* @param newclaims An array of newly claimed rows.
*
* @return The number of claimed rows.
*/
static idx_t p_tryclaim_rows(
idx_t const amt,
idx_t const * const inds,
idx_t const localdim,
rank_info const * const rinfo,
char * const claimed,
idx_t const layerdim,
idx_t * const newclaims)
{
idx_t newrows = 0;
/* find at most amt unclaimed rows in my partition */
for(idx_t i=0; i < localdim; ++i) {
assert(inds[i] < layerdim);
if(claimed[inds[i]] == 0) {
newclaims[newrows++] = inds[i];
claimed[inds[i]] = 1;
if(newrows == amt) {
break;
}
}
}
return newrows;
}
/**
* @brief Claim exactly 'amt' rows, first attempting to grab them from my local
* tensor.
*
* @param amt The number of rows I must claim.
* @param inds Indices local to my tensor.
* @param localdim The size of 'inds'.
* @param rinfo MPI rank information.
* @param claimed An array marking which rows have been claimed.
* @param layerdim The dimension of the layer.
* @param newclaims An array of newly claimed rows.
*
* @return The number of claimed rows.
*/
static idx_t p_mustclaim_rows(
idx_t const amt,
idx_t const * const inds,
idx_t const localdim,
rank_info const * const rinfo,
char * const claimed,
idx_t const layerdim,
idx_t * const newclaims)
{
/* first try local rows */
idx_t newrows = p_tryclaim_rows(amt, inds, localdim, rinfo, claimed,
layerdim, newclaims);
if(newrows == amt) {
return newrows;
}
/* just grab the first amt unclaimed rows */
for(idx_t i=0; i < layerdim; ++i) {
if(claimed[i] == 0) {
newclaims[newrows++] = i;
claimed[i] = 1;
if(newrows == amt) {
break;
}
}
}
assert(newrows == amt);
return newrows;
}
static void p_distribute_u3_rows(
idx_t const m,
int const * const pcount,
idx_t * const pvols,
idx_t const * const rconns,
idx_t * const mine,
idx_t * const nrows, /* # of rows claimed locally */
idx_t const * const inds,
idx_t const localdim,
rank_info * const rinfo)
{
MPI_Comm const comm = rinfo->layer_comm[m];
MPI_Request req;
int const rank = rinfo->layer_rank[m];
int npes;
MPI_Comm_size(comm, &npes);
int msg;
idx_t amt;
idx_t left = rconns[2];
idx_t const dim = rinfo->layer_ends[m] - rinfo->layer_starts[m];
/* mark if row claimed[i] has been claimed */
char * claimed = (char *) splatt_malloc(dim * sizeof(char));
par_memset(claimed, 0, sizeof(char)*dim);
/* a list of all rows I just claimed */
idx_t * myclaims = (idx_t *) splatt_malloc(left * sizeof(idx_t));
/* incoming new assignments */
idx_t * bufclaims = (idx_t *) splatt_malloc(left * sizeof(idx_t));
/* mark the rows already claimed */
#pragma omp parallel for
for(idx_t i=0; i < *nrows; ++i) {
assert(mine[i] < dim);
claimed[mine[i]] = 1;
}
/* Everyone gets a consistent set of claimed rows */
MPI_Allreduce(MPI_IN_PLACE, claimed, dim, MPI_CHAR, MPI_SUM, comm);
for(idx_t i=0; i < dim; ++i) {
assert(claimed[i] <= 1);
}
/* lets root know which process was chosen last for grabbing rows */
int newp = 0;
int mustclaim = 0;
idx_t nclaimed = 0;
while(1) {
if(rank == 0) {
newp = p_make_job(npes, newp, pvols, rinfo, comm, mustclaim, left);
/*printf("pvols =");
for(int p=0; p < npes; ++p) {
printf("\t%"SPLATT_PF_IDX, pvols[p]);
}
printf("\n");*/
}
MPI_Recv(&msg, 1, MPI_INT, 0, 0, comm, &(rinfo->status));
if(msg == MSG_TRYCLAIM || msg == MSG_MUSTCLAIM) {
/* get target number of rows */
MPI_Recv(&amt, 1, SPLATT_MPI_IDX, 0, 0, comm, &(rinfo->status));
/* see how many I can claim */
if(msg == MSG_TRYCLAIM) {
nclaimed = p_tryclaim_rows(amt, inds, localdim, rinfo, claimed, dim,
myclaims);
} else {
nclaimed = p_mustclaim_rows(amt, inds, localdim, rinfo, claimed, dim,
myclaims);
}
/* send new claims to root process */
MPI_Isend(&nclaimed, 1, SPLATT_MPI_IDX, 0, 0, comm, &req);
MPI_Isend(myclaims, nclaimed, SPLATT_MPI_IDX, 0, 0, comm, &req);
/* now mark as mine */
for(idx_t i=0; i < nclaimed; ++i) {
mine[(*nrows)++] = myclaims[i];
}
}
/* check for updated rows, completion, etc. */
if(rank == 0) {
amt = p_check_job(npes, pvols, rinfo, comm, bufclaims, &left);
/* force claim next turn if no progress made this time */
mustclaim = amt == 0;
/*if (mustclaim) {
printf("mustclaim\n");
}
else {
printf("tryclaim\n");
}*/
}
MPI_Recv(&msg, 1, MPI_INT, 0, 0, comm, &(rinfo->status));
if(msg == MSG_UPDATES) {
/* get new rows */
MPI_Bcast(&amt, 1, SPLATT_MPI_IDX, 0, comm);
MPI_Bcast(bufclaims, amt, SPLATT_MPI_IDX, 0, comm);
/* mark as claimed */
#pragma omp parallel for
for(idx_t i=0; i < amt; ++i) {
claimed[bufclaims[i]] = 1;
}
} else if(msg == MSG_FINISHED) {
break;
}
}
free(bufclaims);
free(myclaims);
free(claimed);
MPI_Barrier(comm);
}
/**
* @brief Fill communication volume statistics (connectivity factor rows) and
* store in rconns.
*
* @param m The mode to operate on.
* @param pcount An array of size 'ldim' which stores the count of how many
* ranks have a nonzero in this slice.
* @param ldim The size (number of slices) of my layer.
* @param rinfo MPI rank information.
* @param rconns Row connectivity information. rconns[0] stores the number of
* rows that only appear in 1 rank, rconns[1] stores the number of
* rows that appear in 2 ranks, and rconns[2] stores the number of
* rows that appear in >2 ranks.
*/
static void p_fill_volume_stats(
idx_t const m,
int const * const pcount,
idx_t const ldim,
rank_info const * const rinfo,
idx_t * const rconns)
{
/* count u=1; u=2, u > 2 */
int tot = 0;
idx_t rconns0 = 0;
idx_t rconns2 = 0;
#pragma omp parallel for reduction(+:rconns0,rconns2)
for(idx_t i=0; i < ldim; ++i) {
#ifdef DEBUG
idx_t maxp = 1;
for(idx_t moff=0; moff < rinfo->nmodes; ++moff) {
if(moff != m) {
maxp *= rinfo->dims_3d[moff];
}
}
assert(pcount[i] <= maxp);
#endif
tot += pcount[i];
switch(pcount[i]) {
case 0:
/* this only happens with empty slices */
break;
case 1:
rconns0 += 1;
break;
case 2:
//rconns[1] += 1;
default:
rconns2 += 1;
break;
}
}
rconns[0] = rconns0;
rconns[2] = rconns2;
}
/**
* @brief Computes a factor matrix distribution using a greedy method. Each rank
* claims all rows found only in its own partition and contested rows are
* given in a greedy manner which attempts to minimize total volume.
*
* NOTE: Since ranks can end up with non-contiguous partitions we reorder
* the tensor after distribution to have nice contiguous blocks of the
* factor matrices! tt->indmap will be updated accordingly.
*
* @param rinfo The MPI rank information to fill in.
* @param tt The distributed tensor which MAY be reordered.
*/
static void p_greedy_mat_distribution(
rank_info * const rinfo,
sptensor_t const * const tt,
permutation_t * const perm)
{
/* get the maximum dimension size for my layer */
idx_t max_dim = 0;
for(idx_t m=0; m < tt->nmodes; ++m) {
idx_t const dsize = rinfo->layer_ends[m] - rinfo->layer_starts[m];
if(dsize > max_dim) {
max_dim = dsize;
}
}
/* count of appearances for each idx across all ranks */
idx_t rconns[3];
int * pcount = (int *) splatt_malloc(max_dim * sizeof(int));
idx_t * mine = (idx_t *) splatt_malloc(max_dim * sizeof(idx_t));
int lnpes; /* npes in layer */
idx_t * pvols; /* volumes of each rank */
for(idx_t m=0; m < tt->nmodes; ++m) {
/* layer dimensions */
idx_t const layerdim = tt->dims[m];
/* get local idxs */
idx_t localdim;
idx_t * inds = tt_get_slices(tt, m, &localdim);
par_memset(pcount, 0, layerdim * sizeof(int));
par_memset(mine, 0, layerdim * sizeof(idx_t));
/* mark all idxs that are local to me */
#pragma omp parallel for
for(idx_t i=0; i < localdim; ++i) {
pcount[inds[i]] = 1;
}
/* sum appearances to get communication volume */
MPI_Allreduce(MPI_IN_PLACE, pcount, layerdim, MPI_INT, MPI_SUM,
rinfo->layer_comm[m]);
/* communication volume */
idx_t myvol = 0;
/* number of rows I own */
idx_t nrows = 0;
/* claim all rows that are entirely local to me */
for(idx_t i=0; i < localdim; ++i) {
switch(pcount[inds[i]]) {
case 0:
break;
case 1:
mine[nrows++] = inds[i];
break;
default:
++myvol;
break;
}
}
/* get size of layer and allocate volumes */
int lnpes = rinfo->layer_size[m];
pvols = (idx_t *) splatt_malloc(lnpes * sizeof(idx_t));
/* print out # of rows that are entirely local to me */
/*for(int l=0; l < rinfo->dims_3d[m]; ++l) {
MPI_Barrier(MPI_COMM_WORLD);
if(rinfo->coords_3d[m] == l) {
if(0 == rinfo->layer_rank[m]) printf("%d:\n", l);
for(int p=0; p < rinfo->layer_size[m]; ++p) {
MPI_Barrier(rinfo->layer_comm[m]);
if(rinfo->layer_rank[m] == p) {
printf("\t%"SPLATT_PF_IDX":%"SPLATT_PF_IDX"\n", nrows, localdim);
}
}
}
}*/
/* root process gathers all communication volumes */
MPI_Gather(&myvol, 1, SPLATT_MPI_IDX, pvols, 1, SPLATT_MPI_IDX,
0, rinfo->layer_comm[m]);
/* now distribute rows with >=3 pcount in a greedy fashion */
p_fill_volume_stats(m, pcount, layerdim, rinfo, rconns);
p_distribute_u3_rows(m, pcount, pvols, rconns, mine, &nrows,
inds, localdim, rinfo);
/* prefix sum to get our new mat_start */
idx_t rowoffset;
MPI_Scan(&nrows, &rowoffset, 1, SPLATT_MPI_IDX, MPI_SUM, rinfo->layer_comm[m]);
rowoffset -= nrows;
/* assign new labels - IPERM is easier to fill first.
* newlabels[newindex] = oldindex */
idx_t * const newlabels = perm->iperms[m];
idx_t * const inewlabels = perm->perms[m];
par_memset(newlabels, 0, layerdim * sizeof(idx_t));
#pragma omp parallel for
for(idx_t i=0; i < nrows; ++i) {
assert(rowoffset+i < layerdim);
assert(mine[i] < layerdim);
newlabels[rowoffset+i] = mine[i];
}
MPI_Allreduce(MPI_IN_PLACE, newlabels, layerdim, SPLATT_MPI_IDX, MPI_SUM,
rinfo->layer_comm[m]);
/* fill perm: inewlabels[oldlayerindex] = newlayerindex */
#pragma omp parallel for
for(idx_t i=0; i < layerdim; ++i) {
assert(newlabels[i] < layerdim);
inewlabels[newlabels[i]] = i;
}
/* store matrix info */
rinfo->mat_start[m] = rowoffset;
rinfo->mat_end[m] = SS_MIN(rinfo->mat_start[m] + nrows, layerdim);
free(inds);
free(pvols);
MPI_Barrier(rinfo->layer_comm[m]);
} /* foreach mode */
free(pcount);
free(mine);
}
/**
* @brief Allocate + fill mat_ptrs, an array marking the start index for each
* rank. Indices are local to the layer. NOTE: This is only designed for
* 3D decomposition!
*
* @param rinfo The structure containing MPI information.
*/
static void p_setup_mat_ptrs(
idx_t const mode,
MPI_Comm const comm,
rank_info * const rinfo)
{
/* number of procs in layer */
int npes;
int rank;
MPI_Comm_size(comm, &npes);
MPI_Comm_rank(comm, &rank);
/* allocate space for start/end idxs */
rinfo->mat_ptrs[mode] = (idx_t *) calloc(npes + 1, sizeof(idx_t));
idx_t * const mat_ptrs = rinfo->mat_ptrs[mode];
mat_ptrs[rank] = rinfo->mat_start[mode];
mat_ptrs[npes] = rinfo->layer_ends[mode] - rinfo->layer_starts[mode];
/* Doing a reduce instead of a gather lets us set location mode_rank
* instead of the rank in this communicator */
MPI_Allreduce(MPI_IN_PLACE, mat_ptrs, npes, SPLATT_MPI_IDX, MPI_SUM, comm);
assert(rinfo->mat_ptrs[mode][rank ] == rinfo->mat_start[mode]);
assert(rinfo->mat_ptrs[mode][rank + 1] == rinfo->mat_end[mode]);
}
/******************************************************************************
* PUBLIC FUNCTONS
*****************************************************************************/
permutation_t * mpi_distribute_mats(
rank_info * const rinfo,
sptensor_t * const tt,
splatt_decomp_type const distribution)
{
permutation_t * perm = perm_identity(tt->dims, tt->nmodes);
switch(distribution) {
case SPLATT_DECOMP_COARSE:
/* assign simple 1D matrix distribution */
for(idx_t m=0; m < tt->nmodes; ++m) {
/* allocate space for start/end idxs */
rinfo->mat_ptrs[m] = (idx_t *) calloc(rinfo->npes + 1, sizeof(idx_t));
rinfo->mat_ptrs[m][rinfo->rank] = rinfo->mat_start[m];
rinfo->mat_ptrs[m][rinfo->npes] = rinfo->global_dims[m];
/* Doing a reduce instead of a gather lets us set location mode_rank
* instead of the rank in this communicator */
MPI_Allreduce(MPI_IN_PLACE, rinfo->mat_ptrs[m], rinfo->npes, SPLATT_MPI_IDX,
MPI_SUM, MPI_COMM_WORLD);
assert(rinfo->mat_ptrs[m][rinfo->rank ] == rinfo->mat_start[m]);
assert(rinfo->mat_ptrs[m][rinfo->rank + 1] == rinfo->mat_end[m]);
}
break;
case SPLATT_DECOMP_FINE:
case SPLATT_DECOMP_MEDIUM:
p_greedy_mat_distribution(rinfo, tt, perm);
perm_apply(tt, perm->perms);
for(idx_t m=0; m < tt->nmodes; ++m) {
p_setup_mat_ptrs(m, rinfo->layer_comm[m], rinfo);
}
break;
}
return perm;
}
void mpi_find_owned(
sptensor_t const * const tt,
idx_t const mode,
rank_info * const rinfo)
{
idx_t const m = mode;
idx_t const start = rinfo->mat_start[m];
idx_t const end = rinfo->mat_end[m];
idx_t const * const indmap = tt->indmap[m];
rinfo->ownstart[m]= tt->dims[m];
rinfo->ownend[m] = 0;
rinfo->nowned[m] = 0;
for(idx_t i=0; i < tt->dims[m]; ++i) {
idx_t gi = (indmap == NULL) ? i : indmap[i];
if(gi >= start && gi < end) {
rinfo->nowned[m] += 1;
rinfo->ownstart[m] = SS_MIN(rinfo->ownstart[m], i);
rinfo->ownend[m] = SS_MAX(rinfo->ownend[m], i);
}
}
rinfo->ownend[m] += 1;
if(rinfo->nowned[m] == 0) {
rinfo->ownstart[m] = 0;
rinfo->ownend[m] = 0;
}
/* sanity check to ensure owned rows are contiguous */
if(indmap != NULL) {
for(idx_t i=rinfo->ownstart[m]+1; i < rinfo->ownend[m]; ++i) {
assert(indmap[i] >= start && indmap[i] < end);
assert(indmap[i] == indmap[i-1]+1);
}
}
}
|
ConstantSumWots.h | #pragma once
#include "wots/ClassicWots.h"
#include <math.h>
#include <gmpxx.h>
#include <algorithm>
#include <iostream>
template <class D, int W, int T, int S>
class ConstantSumWots : public virtual ClassicWots<D,W> {
public:
ConstantSumWots() noexcept {};
ConstantSumWots(const ByteArray& seed) noexcept : ClassicWots<D,W>(seed) {};
const unsigned int t1() const noexcept final {return T;};
const unsigned int t2() const noexcept final {return 0;};
const std::vector<unsigned int> checksum(std::vector<unsigned int>& blocks) final {
//Return dummy, there is no need for checksum.
std::vector<unsigned int> dummy;
return dummy;
};
virtual const std::vector<ByteArray> sign(ByteArray& data) {
std::vector<unsigned int> blocks = this->genFingerprint(data);
std::vector<ByteArray> signature(blocks.size());
//#pragma omp parallel for
for(long unsigned int i = 0; i < blocks.size(); i++){
signature[i] = this->digestChain(this->private_key[i], W - blocks[i]);
}
return signature;
};
virtual bool verify(ByteArray& data, std::vector<ByteArray>& signature) {
if(not this->pubKeyIsLoaded())
return false;
std::vector<unsigned int> blocks = this->genFingerprint(data);
ByteArray check;
//#pragma omp parallel for
for(long unsigned int i = 0; i < blocks.size(); i++) {
check += this->digestChain(signature[i], blocks[i]);
}
check = this->digest(check);
//TODO( We can improve this using xor and iterator)
if( std::to_string(this->public_key).compare(std::to_string(check)) == 0 )
return true;
return false;
};
virtual std::vector<unsigned int> genFingerprint(ByteArray& data) {
ByteArray aux = this->digest(data);
mpz_class i;
i.set_str(std::to_string(aux), 16);
std::vector<unsigned int> ret;
ret = this->toConstantSum(i, T, W, S);
//for (const auto i : ret)
//std::cout << i << ' ';
//std::cout<<std::endl;
return ret;
};
protected:
/*
* Overide restriction of W power of 2.
*/
virtual void paramCheck() final {};
/*
* Must be signed to asserct negative cases.
*/
virtual mpz_class binomial(int n, int k) {
if(n < k || n<0 || k<0)
return 0;
mpz_class ret = 0;
mpz_bin_uiui(ret.get_mpz_t(), n, k);
return ret;
}
virtual mpz_class constantSumLen(int blocks, int max, int sum) {
//TODO
//Assert ret >= 0
mpz_class ret = 0;
int aux = std::floor((float)sum/(float)(max+1));
int kmax = std::min(blocks, aux);
for(int k = 0; k <= kmax; k++ ) {
ret += binomial(blocks, k) * binomial(sum - (max+1)*k + blocks -1, blocks - 1) * ((k%2 == 0)?1:-1);
}
return ret;
}
/*
* COULD (but wont) assert that i <= constantSumLen(blocks,max,sum)
* Asserting this here would ruin benchmark results.
*/
virtual std::vector<unsigned int> toConstantSum(mpz_class& i,
int blocks, int max, int sum)
{
if (blocks == 1)
return {(unsigned int)sum};
unsigned int k = 0;
mpz_class left = 0;
mpz_class right = constantSumLen(blocks - 1, max, sum);
while ( !(i>= left && i < right) ) {
k++;
left=right;
right += constantSumLen(blocks - 1, max, sum-k);
}
std::vector<unsigned int> ret = {k};
i -= left;
std::vector<unsigned int> ret2 = toConstantSum(i, blocks - 1, max, sum-k);
ret.insert(ret.end(), ret2.begin(), ret2.end());
return ret;
}
virtual void genPublicKey() {
this->loadPrivateKey();
ByteArray pub;
for(long unsigned int i = 0; i < this->private_key.size(); i++)
pub += this->digestChain(this->private_key[i], W);
this->public_key = this->digest(pub);
};
virtual void genPrivateKey() {
for(unsigned int i = 0; i < T; i++) {
this->private_key.push_back(this->digest(this->private_seed));
}
};
};
|
ConcatenateCoefficient.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018-2019 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include "bb/Manager.h"
#include "bb/Model.h"
namespace bb {
// 定数を連結する
template <typename FT = float, typename BT = float>
class ConcatenateCoefficient : public Model
{
protected:
bool m_host_only = false;
bool m_binary_mode = true;
indices_t m_input_shape;
indices_t m_output_shape;
indices_t m_coeff_shape;
index_t m_concatenate_size;
std::shared_ptr<Tensor> m_param;
std::shared_ptr<Tensor> m_grad;
FrameBuffer m_y_buf;
FrameBuffer m_dx_buf;
std::mt19937_64 m_mt;
public:
struct create_t {
index_t concatenate_size = 0;
std::uint64_t seed = 1;
};
protected:
ConcatenateCoefficient(create_t const &create)
{
m_concatenate_size = create.concatenate_size;
m_mt.seed(create.seed);
m_param = std::shared_ptr<Tensor>(new Tensor);
m_grad = std::shared_ptr<Tensor>(new Tensor);
}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args)
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
public:
~ConcatenateCoefficient() {}
static std::shared_ptr<ConcatenateCoefficient> Create(create_t const &create)
{
return std::shared_ptr<ConcatenateCoefficient>(new ConcatenateCoefficient(create));
}
static std::shared_ptr<ConcatenateCoefficient> Create(index_t concatenate_size, std::uint64_t seed = 1)
{
create_t create;
create.concatenate_size = concatenate_size;
create.seed = seed;
return std::shared_ptr<ConcatenateCoefficient>(new ConcatenateCoefficient(create));
}
std::string GetModelName(void) const { return "ConcatenateCoefficient"; }
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
BB_ASSERT(shape.size() > 0);
// 形状設定
m_input_shape = shape;
// 最後の次元にサイズを積み増しして出力形状設定
m_output_shape = m_input_shape;
m_output_shape[m_output_shape.size() - 1] += m_concatenate_size;
m_coeff_shape = m_input_shape;
m_coeff_shape[m_output_shape.size() - 1] = m_concatenate_size;
// パラメータ初期化
m_param->Resize(DataType<BT>::type, m_coeff_shape); *m_param = 0.5;
m_grad->Resize(DataType<BT>::type, m_coeff_shape); *m_grad = 0;
return m_output_shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
Variables GetParameters(void)
{
Variables parameters;
parameters.PushBack(m_param);
return parameters;
}
Variables GetGradients(void)
{
Variables gradients;
gradients.PushBack(m_grad);
return gradients;
}
void SetParameter(index_t index, BT coeff)
{
auto param_ptr = m_param->Lock<BT>();
param_ptr[index] = coeff;
}
void GetParameter(index_t index) const
{
auto param_ptr = m_param->LockConst<BT>();
return param_ptr[index];
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true)
{
BB_ASSERT(x_buf.GetType() == DataType<FT>::type);
// パラメータクリップ
if ( m_binary_mode ) {
m_param->Clamp_inplace(0.0, 1.0);
}
// 戻り値のサイズ設定
m_y_buf.Resize(x_buf.GetType(), x_buf.GetFrameSize(), m_output_shape);
#if 0 // #ifdef BB_WITH_CUDA
if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_x_buf.IsDeviceAvailable() && m_y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_x = x_buf.LockDeviceMemoryConst();
auto ptr_y = m_y_buf.LockDeviceMemory(true);
bbcu_fp32_ConcatenateConstant_Forward(
(float const *)ptr_x.GetAddr(),
(float *)ptr_y.GetAddr(),
(int )m_x_buf.GetNodeSize(),
(int )m_x_buf.GetFrameSize(),
(int )(m_x_buf.GetFrameStride() / sizeof(float))
);
return m_y_buf;
}
#endif
{
// 汎用版
index_t input_node_size = x_buf.GetNodeSize();
index_t output_node_size = m_y_buf.GetNodeSize();
index_t frame_size = m_y_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<FT>();
auto y_ptr = m_y_buf.Lock<FT>();
auto param_ptr = m_param->Lock<BT>();
#pragma omp parallel for
for (index_t node = 0; node < input_node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
auto x = x_ptr.Get(frame, node);
y_ptr.Set(frame, node, x);
}
}
index_t coeff_size = output_node_size - input_node_size;
if ( DataType<FT>::type == BB_TYPE_BIT ) {
std::uniform_real_distribution<BT> dist((BT)0, (BT)1);
for (index_t i = 0; i < coeff_size; ++i) {
auto coeff = param_ptr[i];
for (index_t frame = 0; frame < frame_size; ++frame) {
y_ptr.Set(frame, input_node_size + i, dist(m_mt) < coeff);
}
}
}
else {
#pragma omp parallel for
for (index_t i = 0; i < coeff_size; ++i) {
auto coeff = param_ptr[i];
for (index_t frame = 0; frame < frame_size; ++frame) {
y_ptr.Set(frame, input_node_size + i, (FT)coeff);
}
}
}
return m_y_buf;
}
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
inline FrameBuffer Backward(FrameBuffer dy_buf)
{
if (dy_buf.Empty()) {
return dy_buf;
}
BB_ASSERT(dy_buf.GetType() == DataType<BT>::type);
// 戻り値のサイズ設定
m_dx_buf.Resize(DataType<BT>::type, dy_buf.GetFrameSize(), m_input_shape);
#if 0 // #ifdef BB_WITH_CUDA
if ( DataType<T>::type == BB_TYPE_FP32 && !m_host_only
&& m_x_buf.IsDeviceAvailable() && m_dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// GPU版
auto ptr_x = m_x_buf.LockDeviceMemoryConst();
auto ptr_dy = dy_buf.LockDeviceMemoryConst();
auto ptr_dx = m_dx_buf.LockDeviceMemory(true);
bbcu_fp32_ReLU_Backward(
(float const *)ptr_x.GetAddr(),
(float const *)ptr_dy.GetAddr(),
(float *)ptr_dx.GetAddr(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
return m_dx_buf;
}
#endif
{
//汎用版
index_t input_node_size = m_dx_buf.GetNodeSize();
index_t output_node_size = dy_buf.GetNodeSize();
index_t frame_size = dy_buf.GetFrameSize();
auto dy_ptr = dy_buf.LockConst<BT>();
auto dx_ptr = m_dx_buf.Lock<BT>();
auto grad_ptr = m_grad->Lock<BT>();
#pragma omp parallel for
for (index_t node = 0; node < input_node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
auto dy = dy_ptr.Get(frame, node);
dx_ptr.Set(frame, node, dy);
}
}
index_t coeff_size = output_node_size - input_node_size;
#pragma omp parallel for
for (index_t i = 0; i < coeff_size; ++i) {
BT dy = 0;
for (index_t frame = 0; frame < frame_size; ++frame) {
dy += dy_ptr.Get(frame, input_node_size + i);
}
grad_ptr[i] += dy / (BT)frame_size;
}
return m_dx_buf;
}
}
};
}
// end of file |
matrixstrassen.h | /**
* @file matrixstrassen.h matrix strassen operations.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H
#define LBCRYPTO_MATH_MATRIXSTRASSEN_H
#include "matrix.h"
namespace lbcrypto {
template<class Element>
class MatrixStrassen : public Serializable {
public:
typedef vector<vector<unique_ptr<Element>>> data_t;
typedef vector<unique_ptr<Element>> lineardata_t;
typedef typename vector<unique_ptr<Element>>::iterator it_lineardata_t;
typedef std::function<unique_ptr<Element>(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Constructor that initializes matrix values using a distribution generation allocator
*
* @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for intialization using a distribution generator.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix; SetSize must be called on this matrix to use it
* Basically this exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
MatrixStrassen(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {}
void SetSize(size_t rows, size_t cols) {
if( this->rows != 0 || this->cols != 0 )
throw std::logic_error("You cannot SetSize on a non-empty matrix");
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
MatrixStrassen(const MatrixStrassen<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& operator=(const MatrixStrassen<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Ones();
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Fill(const Element &val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Identity();
/**
* Sets the first row to be powers of two
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> GadgetVector() const;
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
inline double Norm() const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(MatrixStrassen<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> ScalarMult(Element const& other) const {
MatrixStrassen<Element> result(*this);
#if 0
for (size_t row = 0; row < result.rows; ++row) {
for (size_t col = 0; col < result.cols; ++col) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
#else
#pragma omp parallel for
for (int32_t col = 0; col < result.cols; ++col) {
for (int32_t row = 0; row < result.rows; ++row) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
#endif
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool Equal(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (*data[i][j] != *other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator==(MatrixStrassen<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator!=(MatrixStrassen<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const {
return data;
}
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const {
return rows;
}
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const {
return cols;
}
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const {
return allocZero;
}
/**
* Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation representation
*/
void SetFormat(Format format);
/**
* MatrixStrassen addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Add(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Addition operands have incompatible dimensions");
}
MatrixStrassen<Element> result(*this);
#if 0
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
*result.data[i][j] += *other.data[i][j];
}
}
#else
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] += *other.data[i][j];
}
}
#endif
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator+(MatrixStrassen<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator+=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Sub(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Subtraction operands have incompatible dimensions");
}
MatrixStrassen<Element> result(allocZero, rows, other.cols);
#if 0
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
#else
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
#endif
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator-(MatrixStrassen<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator-=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen transposition
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* MatrixStrassen determinant - found using Laplace formula with complexity O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
inline void Determinant(Element *result) const;
//inline Element Determinant() const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
inline MatrixStrassen<Element> CofactorMatrixStrassen() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element& operator()(size_t row, size_t col) {
return *data[row][col];
}
/**
* MatrixStrassen indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element const& operator()(size_t row, size_t col) const {
return *data[row][col];
}
/**
* MatrixStrassen row extractor
*
* @param &row row index
* @return the row at the index
*/
inline MatrixStrassen<Element> ExtractRow(size_t row) const {
MatrixStrassen<Element> result(this->allocZero,1,this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(0,i) = **elem;
i++;
}
return result;
//return *this;
}
/**
* Print values of the matrix to the cout stream
*
*/
void PrintValues() const;
/**
* Call switch format for each (ring) element
*
*/
inline void SwitchFormat();
/**
* MatrixStrassen multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other, int nrec=0, int pad = -1) const;
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each
* row of the matrix to be added and placed into the corresponding position in the output vector.
*/
MatrixStrassen<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select
* elements in each row together.
* Return a vector that is a rows x 1 matrix.
*/
MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const;
/**
* Serialize the object into a Serialized
* @param serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized* serObj) const;
/**
* Populate the object from the deserialization of the Serialized
* @param serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized& serObj);
private:
struct MatDescriptor {
int lda;
int nrec;
int nproc;
int nprocr;
int nprocc;
int nproc_summa;
int bs;
};
const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor
const int rank=0, base=0;
mutable data_t data;
size_t rows;
mutable int rowpad = 0;
size_t cols;
mutable int colpad = 0;
alloc_func allocZero;
mutable char *pattern = NULL;
mutable int numAdd = 0;
mutable int numMult = 0;
mutable int numSub = 0;
mutable MatDescriptor desc;
mutable unique_ptr<Element> zeroUniquePtr = allocZero();
mutable int NUM_THREADS = 1;
void multiplyInternalCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t work ) const;
void strassenDFSCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t workPassThrough ) const;
void block_multiplyCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor d, it_lineardata_t workPassThrough ) const;
void LinearizeDataCAPS(lineardata_t *lineardataPtr) const;
void UnlinearizeDataCAPS(lineardata_t *lineardataPtr) const;
int getRank() const;
void verifyDescriptor( MatDescriptor desc );
long long numEntriesPerProc( MatDescriptor desc ) const;
//deep copy of data - used for copy constructor
void deepCopyData(data_t const& src);
void getData(const data_t &Adata, const data_t &Bdata, const data_t &Cdata, int row, int inner, int col) const;
void accessUniquePtrCAPS(it_lineardata_t ptr, Element val) const;
void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void addMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void addSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22 ) const;
void subMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const;
void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const ;
void distributeFrom1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void collectTo1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void sendBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldi ) const;
void receiveBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldo ) const;
void distributeFrom1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldi ) const;
void collectTo1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldo ) const;
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template<class Element>
inline MatrixStrassen<Element> operator*(Element const& e, MatrixStrassen<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigVector> RotateVecResult(MatrixStrassen<Poly> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template<class Element>
inline std::ostream& operator<<(std::ostream& os, const MatrixStrassen<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients because it is formed by
* discrete gaussians e and s; this implies int32_t can be used
* This algorithm can be further improved - see the Darmstadt paper section 4.4
* http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be computed
* @return the resulting matrix of floating-point numbers
*/
inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t> &input);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigInteger> &input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigVector> &input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
/**
* Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
}
#endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
|
initialize-brisbane.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header-brisbane.h"
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize()
{
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (m = 0; m < 5; m++) {
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta, &Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0]-1;
xi = 1.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1]-1;
eta = 1.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i =0; i <= grid_points[0]-1; i++) {
xi = (double)(i) *dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2]-1;
zeta = 1.0;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
#pragma omp target update to(u)
brisbane_task task0;
brisbane_task_create(&task0);
brisbane_task_h2d_full(task0, mem_u, u);
brisbane_task_submit(task0, brisbane_cpu, NULL, true);
}
void lhsinit(double lhs[][3][5][5], int size)
{
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
for (n = 0; n < 5; n++) {
for (m = 0; m < 5; m++) {
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
|
CommonUtils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_COMMONUTILS_H_
#define _SPTAG_COMMON_COMMONUTILS_H_
#include "../Common.h"
#include <unordered_map>
#include <exception>
#include <algorithm>
#include <time.h>
#include <omp.h>
#include <string.h>
#define PREFETCH
#ifndef _MSC_VER
#include <stdio.h>
#include <unistd.h>
#include <sys/resource.h>
#include <cstring>
#define InterlockedCompareExchange(a,b,c) __sync_val_compare_and_swap(a, c, b)
#define InterlockedExchange8(a,b) __sync_lock_test_and_set(a, b)
#define Sleep(a) usleep(a * 1000)
#define strtok_s(a, b, c) strtok_r(a, b, c)
#endif
namespace SPTAG
{
namespace COMMON
{
class Utils {
public:
static SizeType rand(SizeType high = MaxSize, SizeType low = 0) // Generates a random int value.
{
return low + (SizeType)(float(high - low)*(std::rand() / (RAND_MAX + 1.0)));
}
static inline float atomic_float_add(volatile float* ptr, const float operand)
{
union {
volatile long iOld;
float fOld;
};
union {
long iNew;
float fNew;
};
while (true) {
iOld = *(volatile long *)ptr;
fNew = fOld + operand;
if (InterlockedCompareExchange((long *)ptr, iNew, iOld) == iOld) {
return fNew;
}
}
}
template<typename T>
static inline int GetBase() {
if (GetEnumValueType<T>() != VectorValueType::Float) {
return (int)(std::numeric_limits<T>::max)();
}
return 1;
}
template <typename T>
static void Normalize(T* arr, DimensionType col, int base) {
double vecLen = 0;
for (DimensionType j = 0; j < col; j++) {
double val = arr[j];
vecLen += val * val;
}
vecLen = std::sqrt(vecLen);
if (vecLen < 1e-6) {
T val = (T)(1.0 / std::sqrt((double)col) * base);
for (DimensionType j = 0; j < col; j++) arr[j] = val;
}
else {
for (DimensionType j = 0; j < col; j++) arr[j] = (T)(arr[j] / vecLen * base);
}
}
template <typename T>
static void BatchNormalize(T* data, SizeType row, DimensionType col, int base, int threads) {
#pragma omp parallel for num_threads(threads)
for (SizeType i = 0; i < row; i++)
{
SPTAG::COMMON::Utils::Normalize(data + i * (size_t)col, col, base);
}
}
static inline void AddNeighbor(SizeType idx, float dist, SizeType *neighbors, float *dists, DimensionType size)
{
size--;
if (dist < dists[size] || (dist == dists[size] && idx < neighbors[size]))
{
DimensionType nb;
for (nb = 0; nb <= size && neighbors[nb] != idx; nb++);
if (nb > size)
{
nb = size;
while (nb > 0 && (dist < dists[nb - 1] || (dist == dists[nb - 1] && idx < neighbors[nb - 1])))
{
dists[nb] = dists[nb - 1];
neighbors[nb] = neighbors[nb - 1];
nb--;
}
dists[nb] = dist;
neighbors[nb] = idx;
}
}
}
};
}
}
#endif // _SPTAG_COMMON_COMMONUTILS_H_
|
GrB_Type_wait.c | //------------------------------------------------------------------------------
// GrB_Type_wait: wait for a user-defined GrB_Type to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_Type has no pending operations
// to wait for. All this method does is verify that the type is properly
// initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_Type_wait // no work, just check if the GrB_Type is valid
(
GrB_Type *type
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#pragma omp flush
GB_WHERE1 ("GrB_Type_wait (&type)") ;
GB_RETURN_IF_NULL (type) ;
GB_RETURN_IF_NULL_OR_FAULTY (*type) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
sink-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -Wunknown-pragmas -Werror" } */
extern void bark (void);
int i,j,k;
int array[555];
int
main()
{
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
/* OUT variant does not apply to ORDERED construct. */
#pragma omp ordered depend(out:i) /* { dg-error "invalid depend kind" } */
/* depend(sink...) is allowed without an offset. */
#pragma omp ordered depend(sink:i,j-1)
#pragma omp ordered depend(sink:i-1,j+2)
bark ();
}
/* depend(sink...) does not apply to `omp task'. */
#pragma omp task depend(sink:i+3) /* { dg-error "only allowed in 'omp ordered'" } */
bark();
#pragma omp ordered depend(source) /* { dg-error "'depend' clause must be closely nested" } */
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
/* Multiple depend(source) allowed. */
#pragma omp ordered depend(source)
#pragma omp ordered depend(source)
}
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
#pragma omp ordered depend(sink:i-2,j-2,k+2) /* { dg-error "does not match number of iteration var" } */
bark();
}
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
#pragma omp ordered depend(sink:i-2) /* { dg-error "does not match number of iteration variables" } */
bark();
}
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
#pragma omp ordered depend(sink:k,i) /* { dg-error "is not an iteration" } */
bark();
}
}
void bar (int, int, int);
void
foo (int n, int m, int o)
{
int i, j, k;
#pragma omp for collapse(2) ordered(3)
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
for (k = 0; k < o; k++)
{
#pragma omp ordered depend(sink: i-1,j,k) depend(sink: i,j-1,k-1) depend(sink: i-1,j-1,k+1)
bar (i, j, k);
#pragma omp ordered depend(source)
}
}
}
int
baz ()
{
int i, j;
#pragma omp parallel for ordered(2)
for (i=0; i < 100; ++i)
for (j=0; j < 100; ++j)
{
#pragma omp ordered depend(sink:i-1,j-3)
bar (i, j, 0);
#pragma omp ordered depend(source)
}
return 0;
}
|
fluid_solver.h | /*
* File: edgebased_levelset.h
* Author: rrossi
*
* Created on July 31, 2009, 10:51 AM
*/
/*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 16:24:38 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED
//#define SPLIT_OSS
#define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/cfd_variables.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class FluidSolver
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef std::size_t SizeType;
//constructor and destructor
FluidSolver(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
const double viscosity,
const double density,
const Vector body_force,
bool use_mass_correction,
double edge_detection_angle,
double stabdt_pressure_factor,
double stabdt_convection_factor,
double tau2_factor,
bool assume_constant_dp
)
: mr_matrix_container(mr_matrix_container),
mr_model_part(mr_model_part),
mstabdt_pressure_factor(stabdt_pressure_factor),
mstabdt_convection_factor(stabdt_convection_factor),
medge_detection_angle(edge_detection_angle),
mtau2_factor(tau2_factor),
massume_constant_dp(assume_constant_dp)
{
mViscosity = viscosity;
noalias(mBodyForce) = body_force;
mRho = density;
mdelta_t_avg = 1000.0;
max_dt = 1.0;
muse_mass_correction = use_mass_correction;
mWallLawIsActive = false;
// for (unsigned int i = 0; i < TDim; i++) mBodyForce[i] = 0;
// mBodyForce[1] = -9.81;
//
// mRho = 1000.0;
};
~FluidSolver()
{
};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mWork.resize(n_nodes);
mvel_n.resize(n_nodes);
mvel_n1.resize(n_nodes);
mPn.resize(n_nodes);
mPn1.resize(n_nodes);
mHmin.resize(n_nodes);
mHavg.resize(n_nodes);
mNodalFlag.resize(n_nodes);
mTauPressure.resize(n_nodes);
mTauConvection.resize(n_nodes);
mTau2.resize(n_nodes);
mPi.resize(n_nodes);
mXi.resize(n_nodes);
mx.resize(n_nodes);
mEdgeDimensions.resize(n_edges);
//convection variables
mBeta.resize(n_nodes);
mdiv_error.resize(n_nodes);
mr_matrix_container.SetToZero(mdiv_error);
// ValuesVectorType external_pressure;
// external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
std::vector< unsigned int> tempFixedVelocities;
std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues;
std::vector< unsigned int> tempPressureOutletList;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
int index = inode->FastGetSolutionStepValue(AUX_INDEX);
if (inode->IsFixed(VELOCITY_X)) //note that the variables can be either all fixed or no one fixed
{
if (inode->IsFixed(VELOCITY_Y) == false || inode->IsFixed(VELOCITY_Z) == false)
{
std::cout << "error found on the fixity of node " << inode->Id() << std::endl;
KRATOS_THROW_ERROR(std::logic_error, "velocities can be either all fixed or none fixed", "")
}
tempFixedVelocities.push_back(index);
tempFixedVelocitiesValues.push_back(mvel_n1[index]);
}
if (inode->IsFixed(PRESSURE))
{
tempPressureOutletList.push_back(index);
// mPressureOutlet.push_back(external_pressure[index]);
}
}
mFixedVelocities.resize(tempFixedVelocities.size(),false);
mFixedVelocitiesValues.resize(tempFixedVelocitiesValues.size(),false);
mPressureOutletList.resize(tempPressureOutletList.size(),false);
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempFixedVelocities.size()); i++)
{
mFixedVelocities[i] = tempFixedVelocities[i];
mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i];
}
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempPressureOutletList.size()); i++)
{
mPressureOutletList[i] = tempPressureOutletList[i];
}
//compute slip normals and fill SlipList
CalculateNormals(mr_model_part.Conditions());
mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes());
if(TDim == 3)
DetectEdges3D(mr_model_part.Conditions());
//determine number of edges and entries
unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize(n_nodes, n_nodes, n_nonzero_entries);
//loop over all nodes
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//define matrix structure row by row (the order does matter!)
if ((j_neighbour > i_node) && (flag == 0))
{
//add diagonal/nodal contribution
mL.push_back(i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back(i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back(i_node, i_node, 0.0);
}
//compute minimum length of the surrounding edges
CalculateEdgeLengths(mr_model_part.Nodes());
//set the pressure projection to the body force value
array_1d<double,3> temp = mRho * mBodyForce;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
inode->FastGetSolutionStepValue(PRESS_PROJ) = temp;
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
double ComputeMinimum_Havg()
{
KRATOS_TRY
double hmin_global = 1e10;
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const double havg_i = mHavg[i_node];
//const double hmin_i = mHmin[i_node];
if(havg_i < hmin_global) hmin_global=havg_i;
}
return hmin_global;
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
double ComputeTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;
mdelta_t_avg = 1e10;
//getting value of current velocity and of viscosity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
double vel_norm = norm_2(v_i);
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i) );
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * mViscosity / (havg_i * havg_i) );
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i));
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
return delta_t;
KRATOS_CATCH("")
}
void UpdateFixedVelocityValues()
{
KRATOS_TRY
//read velocity and pressure data from Kratos
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i_fix[comp] = u_i[comp];
}
KRATOS_CATCH("");
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
void SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
// double time_inv = 1.0 / delta_t;
double time_inv_avg = 1.0/mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
double stabdt_convection_factor = mstabdt_convection_factor;
double tau2_factor = mtau2_factor;
KRATOS_WATCH(stabdt_pressure_factor);
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor,tau2_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity;
double vel_norm = norm_2(a_i);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) );
double tau_conv = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_convection_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) );
mTauPressure[i_node] = tau;
mTauConvection[i_node] = tau_conv;
mTau2[i_node] = (mViscosity + h_avg_i*vel_norm*0.5)*tau2_factor;
}
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i, a_i, U_i, a_j, U_j);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
mr_matrix_container.AssignVectorToVector(mvel_n, mWork); //mWork = mvel_n
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector(mvel_n, mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of first stage")
// double vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of second stage")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//third step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of thir stage")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//fourth step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(mWork, mvel_n1);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of Step1")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const CalcVectorType& vel,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
CalcVectorType& rhs)
{
KRATOS_TRY
int n_nodes = vel.size();
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
const double nu_i = mViscosity;
const double nu_j = mViscosity;
double inverse_rho = 1.0 / mRho;
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = vel[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// double beta = 1.0;
// double beta = beta_i;
// if(beta_j > beta)
// beta = beta_j;
// beta = 1.0;
// edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
//add tau2 term
// boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& LL = edge_ij.LaplacianIJ;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// double aaa = 0.0;
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// aaa += LL(k_comp,m_comp) * (U_j[m_comp] - U_i[m_comp]);
// rhs_i[k_comp] -= tau2_i*aaa;
// }
}
}
//apply wall resistance
if(mWallLawIsActive == true)
ComputeWallResistance(vel,rhs);
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
void SolveStep2(typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize(n_nodes);
rhs.resize(n_nodes);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
// double time_inv = 0.0; //1.0/delta_t;
//read the pressure projection from the database
#endif
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
const double& p_i = mPn1[i_node];
const double& p_old_i = mPn[i_node];
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
double l_ii = 0.0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
const double& p_old_j = mPn[j_neighbour];
const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
const array_1d<double, TDim>& xi_j = mXi[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.5 * (mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = mTauPressure[i_node];
#endif
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian(sum_l_ikjk);
// sum_l_ikjk *= 2.0;
double sum_l_ikjk_onlydt = sum_l_ikjk * (2.0*delta_t);
sum_l_ikjk *= (2.0*delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
rhs_i -= sum_l_ikjk * (p_j - p_i);
rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i);
//calculating the divergence of the fract vel
edge_ij.Sub_D_v(rhs_i, U_i_curr*mRho, U_j_curr * mRho);
//high order stabilizing term
double temp = 0.0;
edge_ij.Add_div_v(temp, xi_i, xi_j);
rhs_i += edge_tau * temp;
//assemble laplacian matrix
mL(i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
mL(i_node, i_node) = l_ii;
}
if(muse_mass_correction == true)
{
std::cout << "****************************************" << std::endl;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i -= mdiv_error[i_node];
}
}
// //find the max diagonal term
// double max_diag = 0.0;
// for (int i_node = 0; i_node < n_nodes; i_node++) {
// double L_diag = mL(i_node, i_node);
// if (fabs(L_diag) > fabs(max_diag)) max_diag = L_diag;
// }
//respect pressure boundary conditions by penalization
double huge = 1e20;
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL(i_node, i_node) = huge;
rhs[i_node] = 0.0;
}
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = max_diag;
// rhs[i_node] = 0.0;
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// mL(i_node, j_neighbour) = 0.0;
// }
// }
//set starting vector for iterative solvers
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
//compute row scaling factors
TSystemVectorType scaling_factors(n_nodes);
double* Lvalues = mL.value_data().begin();
SizeType* Lrow_indices = mL.index1_data().begin();
SizeType* Lcol_indices = mL.index2_data().begin();
for (SizeType k = 0; k < mL.size1(); k++)
{
double t = 0.0;
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
for (SizeType j=col_begin; j<col_end; j++)
if( Lcol_indices[j] == k)
{
t = fabs(Lvalues[j]);
}
// t += Lvalues[j]*Lvalues[j];
// t = sqrt(t);
scaling_factors[k] = 1.0/sqrt(t);
}
for (SizeType k = 0; k < mL.size1(); k++)
{
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
double k_factor = scaling_factors[k];
rhs[k] *= k_factor;
for (SizeType j=col_begin; j<col_end; j++)
{
Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor;
}
}
// double huge = 1e20;
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = 1.0;
// rhs[i_node] = 0.0;
// }
// KRATOS_WATCH(norm_2(rhs));
// KRATOS_WATCH(norm_frobenius(mL));
pLinearSolver->Solve(mL, dp, rhs);
//apply inverse scaling
for (unsigned int k = 0; k < dp.size(); k++)
dp[k] *= scaling_factors[k];
KRATOS_WATCH(*pLinearSolver)
//KRATOS_WATCH(norm_2(dp));
//update pressure
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node];
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes);
//compute pressure proj for the next step
#pragma omp parallel for private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
const double& p_i = mPn1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(xi_i, p_i, p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi, rNodes);
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double factor = 0.5;
if(massume_constant_dp == true)
factor = 1.0;
//compute end of step momentum
double rho_inv = 1.0 / mRho;
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor;
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j);
}
//compute prefactor
double coefficient = delta_t * m_inv;
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i_curr[comp] += coefficient * correction[comp];
}
ApplyVelocityBC(mvel_n1);
//write velocity of time step n+1 to Kratos
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
//calculate the error on the divergence
if(muse_mass_correction == true)
{
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& div_i_err = mdiv_error[i_node];
div_i_err = 0.0;
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_D_v(div_i_err, U_i_curr*mRho, U_j_curr * mRho);
}
}
}
// KRATOS_WATCH("end of step3")
// double vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
KRATOS_CATCH("")
}
//************************************
void ApplyVelocityBC(CalcVectorType& VelArray)
{
KRATOS_TRY
if(mWallLawIsActive == false)
{
//apply conditions on corner edges
int edge_size = medge_nodes_direction.size();
#pragma omp parallel for firstprivate(edge_size)
for (int i = 0; i < edge_size; i++)
{
int i_node = medge_nodes[i];
const array_1d<double, TDim>& direction = medge_nodes_direction[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
double temp=0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
temp += U_i[comp] * direction[comp];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = direction[comp]*temp;
}
//apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = 0.0;
}
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
array_1d<double, TDim>& U_i = VelArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
//fixed condition
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
array_1d<double, TDim>& u_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i[comp] = u_i_fix[comp];
}
KRATOS_CATCH("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//2D case
if (TDim == 2)
{
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal2D(cond_it, area_normal);
}//3D case
else if (TDim == 3)
{
//help vectors for cross product
array_1d<double, 3 > v1;
array_1d<double, 3 > v2;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal3D(cond_it, area_normal, v1, v2);
}
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mSlipNormal.resize(n_nodes);
std::vector<bool> is_slip(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias(mSlipNormal[i_node]) = ZeroVector(TDim);
is_slip[i_node] = false;
}
//loop over all faces
const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//slip condition
if (cond_it->GetValue(IS_STRUCTURE))
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& slip_normal = mSlipNormal[i_node];
is_slip[i_node] = true;
for (unsigned int comp = 0; comp < TDim; comp++)
{
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of slip nodes
std::vector< unsigned int> tempmSlipBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (is_slip[i_node] == true)
tempmSlipBoundaryList.push_back(i_node);
}
mSlipBoundaryList.resize(tempmSlipBoundaryList.size(),false);
#pragma omp parallel for
for( int i=0; i<static_cast<int>(tempmSlipBoundaryList.size()); i++)
mSlipBoundaryList[i] = tempmSlipBoundaryList[i];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mFixedVelocities.clear();
mFixedVelocitiesValues.clear();
mPressureOutletList.clear();
// mPressureOutlet.clear();
mSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mTau2.clear();
mBeta.clear();
mdiv_error.clear();
KRATOS_CATCH("")
}
void ActivateWallResistance(double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
}
void ComputePressureStabilization()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
//read time step size from Kratos
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
// double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
// double time_inv = 1.0 / delta_t;
double time_inv_avg = 1.0 / mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
KRATOS_WATCH(stabdt_pressure_factor);
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity;
double vel_norm = norm_2(a_i);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor * time_inv_avg + (4.0 * nu_i) / (h_avg_i * h_avg_i));
mTauPressure[i_node] = tau;
}
KRATOS_CATCH("");
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void ViscosityCorrectionStep()
{
KRATOS_TRY
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
CalcVectorType rhs;
rhs.resize(n_nodes);
//calculating the RHS
// double inverse_rho = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//initializing with the external forces (e.g. gravity)
// double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = 0.0 ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ViscousContribution(rhs_i, U_i, mViscosity, U_j, mViscosity);
}
}
//correcting the velocity
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n1, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
void ComputeViscousForces()
{
KRATOS_TRY
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
// double delta_t = CurrentProcessInfo[DELTA_TIME];
CalcVectorType rhs;
rhs.resize(n_nodes);
//calculating the RHS
// double inverse_rho = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//initializing with the external forces (e.g. gravity)
// double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = 0.0 ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ViscousContribution(rhs_i, U_i, mViscosity, U_j, mViscosity);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
rhs_i[l_comp] *= m_inv;
}
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& rhs_i = rhs[i_node];
array_1d<double, TDim>& proj_i = mXi[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
rhs_i[l_comp] = mBodyForce[l_comp] + proj_i[l_comp];
}
mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, rNodes);
KRATOS_CATCH("")
}
void ComputeReactions(bool exclude_convection_terms)
{
KRATOS_TRY
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
CalcVectorType rhs;
rhs.resize(n_nodes);
mr_matrix_container.SetToZero(rhs);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
const double nu_i = mViscosity;
const double nu_j = mViscosity;
double inverse_rho = 1.0 / mRho;
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double dt_inv = 1.0/delta_t;
if(exclude_convection_terms == true)
{
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = mPn1[i_node];
//double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
//const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = mPn1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Add_Gp(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
// edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
// edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
}
}
else
{
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = mPn1[i_node];
double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = mPn1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Add_Gp(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
}
}
//add inertia terms
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
array_1d<double, TDim>& v_i = mvel_n1[i_node];
array_1d<double, TDim>& vold_i = mvel_n[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= m_i*dt_inv*(v_i[comp] - vold_i[comp]) ;
}
//apply wall resistance
if(mWallLawIsActive == true)
ComputeWallResistance(mvel_n1,rhs);
mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, rNodes);
KRATOS_CATCH("");
}
private:
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool muse_mass_correction;
//parameters controlling the wall law
bool mWallLawIsActive;
bool mY_wall;
//parameters for controlling the usage of the delta time in the stabilization
double mstabdt_pressure_factor;
double mstabdt_convection_factor;
double medge_detection_angle;
double mtau2_factor;
bool massume_constant_dp;
//nodal values
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mx;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
CalcVectorType mEdgeDimensions;
//area normal
CalcVectorType mSlipNormal;
//projection terms
CalcVectorType mPi, mXi;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities;
CalcVectorType mFixedVelocitiesValues;
// ValuesVectorType mPressureOutlet;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
ValuesVectorType mTau2;
ValuesVectorType mdiv_error;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//constant variables
double mRho;
double mViscosity;
array_1d<double, TDim> mBodyForce;
//variables for convection
ValuesVectorType mBeta;
//variables for edge BCs
IndicesVectorType medge_nodes;
CalcVectorType medge_nodes_direction;
IndicesVectorType mcorner_nodes;
double mdelta_t_avg;
double max_dt;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = -(face_geometry[1].X() - face_geometry[0].X());
area_normal[2] = 0.00;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct(area_normal, v1, v2);
area_normal *= -0.5;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, 3 > > position;
position.resize(n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//save its coordinates locally
noalias(position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
//take unstructured meshes into account
if (TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt(2.0 * m_i);
}
}
else if (TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = pow(6.0 * m_i, 1.0 / 3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, 3 > & pos_i = position[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, 3 > & pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH("")
}
//**************************************
void CornerDectectionHelper(Geometry< Node < 3 > >& face_geometry,
const array_1d<double, 3 > & face_normal,
const double An,
const WeakPointerVector<Condition>& neighb,
const unsigned int i1,
const unsigned int i2,
const unsigned int neighb_index,
std::vector<unsigned int>& edge_nodes,
CalcVectorType& cornern_list
)
{
double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted
double acceptable_cos = cos(acceptable_angle);
if (face_geometry[i1].Id() < face_geometry[i2].Id()) //we do this to add the face ones
{
const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue(NORMAL);
double neighb_An = norm_2(neighb_normal);
double cos_normal = 1.0 / (An * neighb_An) * inner_prod(face_normal, neighb_normal);
//if the angle is too big between the two normals then the edge in the middle is a corner
if (cos_normal < acceptable_cos)
{
array_1d<double, TDim > edge;
for(unsigned int i = 0 ; i < TDim ; i++)
edge[i] = face_geometry[i2].Coordinates()[i] - face_geometry[i1].Coordinates()[i];
double temp = norm_2(edge);
edge /= temp;
int index1 = face_geometry[i1].FastGetSolutionStepValue(AUX_INDEX);
int index2 = face_geometry[i2].FastGetSolutionStepValue(AUX_INDEX);
edge_nodes[index1] += 1;
edge_nodes[index2] += 1;
double sign1 = inner_prod(cornern_list[index1], edge);
if (sign1 >= 0)
cornern_list[index1] += edge;
else
cornern_list[index1] -= edge;
double sign2 = inner_prod(cornern_list[index2], edge);
if (sign2 >= 0)
cornern_list[index2] += edge;
else
cornern_list[index2] -= edge;
}
}
}
//function to calculate the area normals
void DetectEdges3D(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
std::vector<unsigned int> temp_edge_nodes(n_nodes);
CalcVectorType temp_cornern_list(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
temp_edge_nodes[i_node] = 0.0;
noalias(temp_cornern_list[i_node]) = ZeroVector(TDim);
}
//loop over all faces
// const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
const array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
double An = norm_2(face_normal);
unsigned int current_id = cond_it->Id();
//slip condition
if (cond_it->GetValue(IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours
{
const WeakPointerVector<Condition>& neighb = cond_it->GetValue(NEIGHBOUR_CONDITIONS);
//check for neighbour zero
if (neighb[0].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list);
//check for neighbour one
if (neighb[1].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list);
//check for neighbour two
if (neighb[2].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list);
}
}
//fill the list of edge_nodes
std::vector<unsigned int> tempmedge_nodes;
std::vector< array_1d<double,TDim> > tempmedge_nodes_direction;
std::vector<unsigned int> tempmcorner_nodes;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (temp_edge_nodes[i_node] == 2) //node is a edge_node
{
tempmedge_nodes.push_back(i_node);
array_1d<double, TDim>& node_edge = temp_cornern_list[i_node];
node_edge /= norm_2(node_edge);
tempmedge_nodes_direction.push_back(node_edge);
}
else if (temp_edge_nodes[i_node] > 2)
tempmcorner_nodes.push_back(i_node);
}
medge_nodes.resize(tempmedge_nodes.size(),false);
medge_nodes_direction.resize(tempmedge_nodes_direction.size(),false);
mcorner_nodes.resize(tempmcorner_nodes.size(),false);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmedge_nodes.size()); i++)
{
medge_nodes[i] = tempmedge_nodes[i];
medge_nodes_direction[i] = tempmedge_nodes_direction[i];
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmcorner_nodes.size()); i++)
{
mcorner_nodes[i] = tempmcorner_nodes[i];
}
for (unsigned int i = 0; i < mcorner_nodes.size(); i++)
{
KRATOS_WATCH(mcorner_nodes[i]);
}
KRATOS_CATCH("")
}
void ComputeWallResistance(
const CalcVectorType& vel,
CalcVectorType& rhs
)
{
//parameters:
double k = 0.41;
double B = 5.1;
double density = mRho;
double mu = mViscosity;
double toll = 1e-6;
double ym = mY_wall; //0.0825877; //0.0093823
double y_plus_incercept = 10.9931899;
unsigned int itmax = 100;
if (mu == 0)
KRATOS_THROW_ERROR(std::logic_error, "it is not possible to use the wall law with 0 viscosity", "");
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,density,mu,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt(mod_vel);
area = sqrt(area);
//now compute the skin friction
double mod_uthaw = sqrt(mod_vel * mu / ym);
const double y_plus = ym * mod_uthaw / mu;
if (y_plus > y_plus_incercept)
{
//begin cicle to calculate the real u_thaw's module:
unsigned int it = 0;
double dx = 1e10;
// KRATOS_WATCH(fabs(dx));
while (fabs(dx) > toll * mod_uthaw && it < itmax)
{
double a = 1.0 / k;
double temp = a * log(ym * mod_uthaw / mu) + B;
double y = mod_uthaw * (temp) - mod_vel;
double y1 = temp + a;
dx = y / y1;
mod_uthaw -= dx;
it = it + 1;
}
// KRATOS_WATCH(toll*mod_uthaw);
// KRATOS_WATCH(area);
// KRATOS_WATCH(it);
if (it == itmax)
std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl;
}
// else
// {
// for (unsigned int comp = 0; comp < TDim; comp++)
// rhs_i[comp] -= U_i[comp] * area * mu / (density*ym) ;
// }
if (mod_vel > 1e-12)
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= U_i[comp] * area * mod_uthaw * mod_uthaw * density / (mod_vel);
}
}
};
} //namespace Kratos
//#undef SYMM_PRESS
#endif //KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED defined
|
reduction_modifier.c |
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main()
{
int a, b, c;
#pragma omp parallel reduction (inscan, +:a, b) reduction (-:a, c)
{
printf("This is for testing parser and AST construction, which could be only syntax correct.\n");
}
#pragma omp parallel reduction (task, +:a, b) reduction (task, +:a, c)
{
printf("This is for testing parser and AST construction, which could be only syntax correct.\n");
}
#pragma omp parallel reduction (default, -:a, b) reduction (inscan, -:a, c)
{
printf("This is for testing parser and AST construction, which could be only syntax correct.\n");
}
return 0;
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
w,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (w=0; w < (ssize_t) width; w+=2)
{
ssize_t
j,
k,
u,
v;
kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-w),(width-w)*sizeof(**kernel)));
if (kernel[w] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-w-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[w][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[w][k];
k++;
}
}
kernel[w][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[w][(k-1)/2]=1.0;
}
if (w < (ssize_t) width)
{
for (w-=2; w >= 0; w-=2)
kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (w=0; w < (ssize_t) width; w+=2)
kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
w,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (w=0; w < (ssize_t) width; w+=2)
{
ssize_t
j,
k,
u,
v;
kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-w),(width-w)*sizeof(**kernel)));
if (kernel[w] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-w-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[w][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[w][k];
k++;
}
}
kernel[w][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[w][(k-1)/2]=1.0;
}
if (w < (ssize_t) width)
{
for (w-=2; w >= 0; w-=2)
kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if ((sharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (w=0; w < (ssize_t) width; w+=2)
kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l a t e r a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing
% smoothing filter for images. It replaces the intensity of each pixel with
% a weighted average of intensity values from nearby pixels. This weight is
% based on a Gaussian distribution. The weights depend not only on Euclidean
% distance of pixels, but also on the radiometric differences (e.g., range
% differences, such as color intensity, depth distance, etc.). This preserves
% sharp edges.
%
% The format of the BilateralBlurImage method is:
%
% Image *BilateralBlurImage(const Image *image,const size_t width,
% const size_t height,const double intensity_sigma,
% const double spatial_sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the neighborhood in pixels.
%
% o height: the height of the neighborhood in pixels.
%
% o intensity_sigma: sigma in the intensity space. A larger value means
% that farther colors within the pixel neighborhood (see spatial_sigma)
% will be mixed together, resulting in larger areas of semi-equal color.
%
% o spatial_sigma: sigma in the coordinate space. A larger value means that
% farther pixels influence each other as long as their colors are close
% enough (see intensity_sigma ). When the neigborhood diameter is greater
% than zero, it specifies the neighborhood size regardless of
% spatial_sigma. Otherwise, the neigborhood diameter is proportional to
% spatial_sigma.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double BlurDistance(const ssize_t x,const ssize_t y,
const ssize_t u,const ssize_t v)
{
return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v)));
}
static inline double BlurGaussian(const double x,const double sigma)
{
return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))*
PerceptibleReciprocal(Magick2PI*sigma*sigma));
}
static double **DestroyBilateralThreadSet(const ssize_t number_threads,
double **weights)
{
ssize_t
i;
assert(weights != (double **) NULL);
for (i=0; i <= (ssize_t) number_threads; i++)
if (weights[i] != (double *) NULL)
weights[i]=(double *) RelinquishMagickMemory(weights[i]);
weights=(double **) RelinquishMagickMemory(weights);
return(weights);
}
static double **AcquireBilateralThreadSet(const size_t number_threads,
const size_t width,const size_t height)
{
double
**weights;
ssize_t
i;
weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights));
if (weights == (double **) NULL)
return((double **) NULL);
(void) memset(weights,0,number_threads*sizeof(*weights));
for (i=0; i <= (ssize_t) number_threads; i++)
{
weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights));
if (weights[i] == (double *) NULL)
return(DestroyBilateralThreadSet(number_threads,weights));
}
return(weights);
}
MagickExport Image *BilateralBlurImage(const Image *image,const size_t width,
const size_t height,const double intensity_sigma,const double spatial_sigma,
ExceptionInfo *exception)
{
#define MaxIntensity (255)
#define BilateralBlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
intensity_gaussian[2*(MaxIntensity+1)],
*spatial_gaussian,
**weights;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
mid;
ssize_t
number_threads,
w,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
weights=AcquireBilateralThreadSet(number_threads,width,height);
if (weights == (double **) NULL)
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (w=(-MaxIntensity); w < MaxIntensity; w++)
intensity_gaussian[w+MaxIntensity]=BlurGaussian((double) w,intensity_sigma);
spatial_gaussian=weights[number_threads];
{
ssize_t
n,
v;
n=0;
mid.x=(ssize_t) (width/2L);
mid.y=(ssize_t) (height/2L);
for (v=0; v < (ssize_t) height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) width; u++)
spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y),
spatial_sigma);
}
}
/*
Bilateral blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
gamma,
pixel;
const Quantum
*magick_restrict p,
*magick_restrict r;
ssize_t
i,
u;
ssize_t
n,
v;
/*
Tonal weighting preserves edges while smoothing in the flat regions.
*/
p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height,
exception);
if (p == (const Quantum *) NULL)
break;
p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)*
mid.x;
n=0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
intensity;
r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))-
(double) ScaleQuantumToChar(GetPixelIntensity(image,p));
if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity))
weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]*
spatial_gaussian[n];
else
weights[id][n]=BlurGaussian(intensity,intensity_sigma)*
BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma);
n++;
}
}
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
pixel=0.0;
gamma=0.0;
n=0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n];
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
alpha,
beta;
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n]*alpha*beta;
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
}
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BilateralBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
weights=DestroyBilateralThreadSet(number_threads,weights);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you.
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,gaussian_image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanline,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanline_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanline));
if (scanline_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanline=(float *) GetVirtualMemoryBlob(scanline_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanline_info=RelinquishVirtualMemory(scanline_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
PixelTrait
traits;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(contrast_image,ClampToQuantum((MagickRealType)
GetPixelRed(image,p)*mult),q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType)
GetPixelGreen(image,p)*mult),q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType)
GetPixelBlue(image,p)*mult),q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanline_info=RelinquishVirtualMemory(scanline_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
size_t
width;
ssize_t
w,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (w=0; w < (ssize_t) width; w++)
{
offset[w].x=CastDoubleToLong(ceil((double) (w*point.y)/
hypot(point.x,point.y)-0.5));
offset[w].y=CastDoubleToLong(ceil((double) (w*point.x)/
hypot(point.x,point.y)-0.5));
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception);
if (blur_image != (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return(blur_image);
}
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
MagickRealType
*magick_restrict k;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
preview_image->alpha_trait=UndefinedPixelTrait;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
size_t
n;
ssize_t
w,
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL))
{
if (cos_theta != (double *) NULL)
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (double *) NULL)
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (w=0; w < (ssize_t) n; w++)
{
cos_theta[w]=cos((double) (theta*w-offset));
sin_theta[w]=sin((double) (theta*w-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
size_t
width;
ssize_t
center,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
{
ssize_t
i,
j,
v;
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
ssize_t
u;
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double)
*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
const Quantum
*magick_restrict l,
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post)+
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre)-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if ((shade_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if ((unsharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
main.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 1024
int a[N][N], b[N][N];
int seq[N][N];
int reduce[N][N];
int transpose_reduce_shared[N][N];
double cal_time(struct timespec *t_end, struct timespec *t_start)
{
double elapsedTime;
elapsedTime = (t_end->tv_sec - t_start->tv_sec) * 1000.0;
elapsedTime += (t_end->tv_nsec - t_start->tv_nsec) / 1000000.0;
return elapsedTime;
}
int test()
{
struct timespec t_start, t_end;
int i, j, f, k;
// Generate data
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
{
a[i][j] = rand() % N;
b[i][j] = rand() % N;
}
// Sequential
clock_gettime(CLOCK_REALTIME, &t_start);
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
{
seq[i][j] = 0;
for (k = 0; k < N; k++)
seq[i][j] += a[i][k] * b[k][j];
}
clock_gettime(CLOCK_REALTIME, &t_end);
double final_seq = cal_time(&t_end, &t_start);
printf("Sequential time: %lf ms\n", final_seq);
// Reduce access times + Parallel
clock_gettime(CLOCK_REALTIME, &t_start);
#pragma omp parallel for collapse(2) shared(reduce, a, b) schedule(dynamic, 16)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
for (int k = 0; k < N; k++)
{
sum += a[i][k] * b[k][j];
}
reduce[i][j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &t_end);
double final_reduce = cal_time(&t_end, &t_start);
printf("Parallel reduce time: %lf ms\n", final_reduce);
// Transpose + Reduce + parallel
clock_gettime(CLOCK_REALTIME, &t_start);
for (int i = 0; i < N; i++)
{
for (int j = i + 1; j < N; j++)
{
int temp = b[i][j];
b[i][j] = b[j][i];
b[j][i] = temp;
}
}
#pragma omp parallel for shared(transpose_reduce_shared, a, b) collapse(2) schedule(dynamic, 16)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
transpose_reduce_shared[i][j] = 0;
for (int k = 0; k < N; k++)
{
sum += a[i][k] * b[j][k];
}
transpose_reduce_shared[i][j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &t_end);
double best = cal_time(&t_end, &t_start);
printf("Parallel transpose_reduce_shared time: %lf ms\n", best);
// Evaluation
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
if (seq[i][j] != reduce[i][j] || seq[i][j] != transpose_reduce_shared[i][j])
{
break;
}
}
}
if (i == N && j == N)
{
printf("Test pass!!!\n");
}
else
{
printf("Test failure..\n");
return 0;
}
}
int main()
{
test();
return 0;
}
|
GB_unop__identity_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_int32)
// op(A') function: GB (_unop_tran__identity_uint32_int32)
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_int32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-243,256)),ceild(32*t3-Ny-243,256));t4<=min(min(min(min(floord(4*Nt+Nx-9,256),floord(8*t1+Nx+7,256)),floord(16*t2+Nx+3,256)),floord(32*t3+Nx+19,256)),floord(16*t1-16*t2+Nz+Nx+5,256));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),64*t4+62);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB028-privatemissing-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be annotated as private to avoid race condition.
Data race pairs: tmp@65:5 vs. tmp@66:12
tmp@65:5 vs. tmp@65:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char * argv[])
{
int i;
int tmp;
int len = 100;
int a[100];
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
}
#pragma cetus private(i, tmp)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i, tmp)
for (i=0; i<len; i ++ )
{
tmp=(a[i]+i);
a[i]=tmp;
}
printf("a[50]=%d\n", a[50]);
_ret_val_0=0;
return _ret_val_0;
}
|
GB_binop__lxor_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint64)
// A*D function (colscale): GB (_AxD__lxor_uint64)
// D*A function (rowscale): GB (_DxB__lxor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint64)
// C=scalar+B GB (_bind1st__lxor_uint64)
// C=scalar+B' GB (_bind1st_tran__lxor_uint64)
// C=A+scalar GB (_bind2nd__lxor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ft_TBHP.h | /*-------------------------------------------------------
FAST-TrIPs: Flexible Assignment and Simulation Tool for Transit and Intermodal Passengers
Copyright 2014 Alireza Khani and Mark Hickman
Licensed under the Apache License, Version 2.0
-------------------------------------------------------
Code primarily written by Alireza Khani
Under supervision of Mark Hickman
Contact:
Alireza Khani: akhani@utexas.edu or akhani@email.arizona.edu
Mark Hickman: m.hickman1@uq.edu.au
-------------------------------------------------------
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------*/
#include <time.h>
#include <stdlib.h>
using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
int forwardTBHP(string _origin, double _PDT, int _timeBuffer, int _mode){
int i, j, numIterations, permanentLabel, tmpTransferStop, tmpAccessType;
int tmpNumAccess, tmpNumTransfers, tmpSeqNum, tmpMaxSeq;
double tmpCurrentLabel, tmpEarliestArrival, tmpOldLabel, tmpNewLabel, tmpNewCost, tmpNonWalkLabel;
double tmpAccessTime, tmpTransferTime, tmpNewDeparture, tmpNewArrival, tmpInVehTime, tmpWaitingTime;
string buf, tmpStr, tmpQueuvalue, tmpCurrentStop, tmpNewStop, tmpAccessibleTrips, tmpTrip, tmpCurrentMode, tmpNewMode;
char chr[99];
vector<string> tokens;
list<taz*>::iterator tmpTazListIter;
taz* tmpTazPntr;
list<stop*>::iterator tmpStopListIter;
stop* tmpStopPntr;
list<trip*>::iterator tmpTripListIter;
trip* tmpTripPntr;
priority_queue<string> stopQueue;
//Initialization
for(tmpTazListIter=tazList.begin();tmpTazListIter!=tazList.end();tmpTazListIter++){
tmpTazPntr = NULL;
tmpTazPntr = *tmpTazListIter;
tmpTazPntr->resetTazStrategy();
}
for(tmpStopListIter=stopList.begin();tmpStopListIter!=stopList.end();tmpStopListIter++){
tmpStopPntr = NULL;
tmpStopPntr = *tmpStopListIter;
tmpStopPntr->resetStopStrategy();
}
for(tmpTripListIter=tripList.begin();tmpTripListIter!=tripList.end();tmpTripListIter++){
tmpTripPntr = NULL;
tmpTripPntr = *tmpTripListIter;
tmpTripPntr->resetTripUsedBefore(0);
}
stopQueue.empty();
//Access from Origin TAZ
tmpTazPntr = NULL;
tmpTazPntr = tazSet[_origin];
tmpTazPntr->forwardStrategyUpdate(1, _PDT, "Start", 1);
tmpNumAccess = tmpTazPntr->getNumStops();
for(i=0;i<tmpNumAccess;i++){
tmpAccessType = tmpTazPntr->getAccessType(i);
if(_mode==3 && tmpAccessType==2) continue;
if(_mode==2 && tmpAccessType==1) continue;
tmpNewStop = tmpTazPntr->getStop(i);
tmpAccessTime = tmpTazPntr->getAccessTime(i);
tmpNewCost = 1 + originWalkEqv * tmpAccessTime;
tmpNewLabel = tmpNewCost;
tmpNewArrival = _PDT + tmpAccessTime;
stopSet[tmpNewStop]->forwardStrategyUpdate(tmpNewLabel, tmpNewArrival, "Access", _origin, tmpNewCost, -101);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
//Labeling loop
numIterations = 0;
while(stopQueue.size()>0){
tmpStr = stopQueue.top();
stopQueue.pop();
tmpCurrentStop = tmpStr.substr(6,99);
tmpStopPntr = NULL;
tmpStopPntr = stopSet[tmpCurrentStop];
tmpCurrentLabel = tmpStopPntr->getStrategyLabel();
tmpNonWalkLabel = tmpStopPntr->getForwardNonWalkLabel();
tmpEarliestArrival = tmpStopPntr->getStrategyEarliestArrival();
tmpCurrentMode = tmpStopPntr->getArrivalTripId(0);
permanentLabel = tmpStopPntr->getStrategyPermanentLabel();
tmpTransferStop = tmpStopPntr->getTransferStop();
if(permanentLabel==1 || tmpTransferStop==0){
continue;
}else{
tmpStopPntr->setStrategyPermanentLabel();
}
//Update by Transfers
if(tmpCurrentMode!="Access"){
tmpNumTransfers = tmpStopPntr->getNumTransfers();
for(i=0;i<tmpNumTransfers;i++){
tmpNewStop = tmpStopPntr->getTransfer(i);
tmpTransferTime = tmpStopPntr->getTransferTime(i);
tmpNewCost = tmpNonWalkLabel + transferWalkEqv * tmpTransferTime;
tmpOldLabel = stopSet[tmpNewStop]->getStrategyLabel();
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 1"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
tmpNewArrival = tmpEarliestArrival + tmpTransferTime;
stopSet[tmpNewStop]->forwardStrategyUpdate(tmpNewLabel, tmpNewArrival, "Transfer", tmpCurrentStop, tmpNewCost, -101);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
}
}
//Update by Trips
tmpAccessibleTrips = tmpStopPntr->getForwardAccessibleTrips(tmpEarliestArrival, _timeBuffer);
buf.clear();
tokens.clear();
stringstream ss(tmpAccessibleTrips);
while (ss >> buf){
tokens.push_back(buf);
}
for(i=0;i<tokens.size();i=i+2){
tmpTrip = tokens[i];
tmpSeqNum = atoi(tokens[i+1].c_str());
tmpTripPntr = tripSet[tmpTrip];
if (tmpTripPntr->getTripUsedBefore(0)==1){
continue;
}
tmpMaxSeq = tmpTripPntr->getMaxSequence();
for(j=tmpSeqNum+1;j<=tmpMaxSeq;j++){
//for(j=max(1,tmpSeqNum-1);j<tmpSeqNum;j++){ //LB
tmpNewStop = tmpTripPntr->getStop(j);
tmpNewMode = stopSet[tmpNewStop]->getArrivalTripId(0);
if(tmpNewMode=="Access"){
continue;
}
tmpNewArrival = tmpTripPntr->getSchArrival(j);
tmpNewDeparture = tmpTripPntr->getSchDeparture(tmpSeqNum);
tmpInVehTime = tmpNewArrival - tmpNewDeparture;
tmpWaitingTime = tmpNewDeparture - tmpEarliestArrival;
if(tmpCurrentMode=="Transfer" || tmpCurrentMode.substr(0,1)=="t"){
tmpNewCost = tmpCurrentLabel + tmpInVehTime + waitingEqv * tmpWaitingTime + 60.0*fare/VOT + transferPenalty;
}else{
tmpNewCost = tmpCurrentLabel + tmpInVehTime + scheduleDelayEqv * tmpWaitingTime + 60.0*fare/VOT;
}
/*if ((tmpTripPntr->getRouteId()).length()>3 && (tmpTripPntr->getRouteId()).substr(1,1)=="9"){
tmpNewCost = tmpNewCost + (60.0*1.50)/VOT;
}*/
tmpOldLabel = stopSet[tmpNewStop]->getStrategyLabel();
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 2"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
stopSet[tmpNewStop]->forwardStrategyUpdate(tmpNewLabel, tmpNewArrival, tmpTrip, tmpCurrentStop, tmpNewCost, tmpNewDeparture);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
}
tmpTripPntr->setTripUsedBefore(0);
}
numIterations++;
}
//Connect to All Other TAZ Centroid
for(tmpTazListIter=tazList.begin();tmpTazListIter!=tazList.end();tmpTazListIter++){
tmpTazPntr = NULL;
tmpTazPntr = *tmpTazListIter;
tmpNumAccess = tmpTazPntr->getNumStops();
for(i=0;i<tmpNumAccess;i++){
tmpAccessType = tmpTazPntr->getAccessType(i);
if(tmpAccessType==2) continue;
tmpOldLabel = tmpTazPntr->getStrategyLabel();
tmpNewStop = tmpTazPntr->getStop(i);
tmpAccessTime = tmpTazPntr->getAccessTime(i);
tmpNewArrival = stopSet[tmpNewStop]->getStrategyLatestArrival() - tmpAccessTime;
tmpNonWalkLabel = stopSet[tmpNewStop]->getForwardNonWalkLabel();
tmpNewCost = tmpNonWalkLabel + originWalkEqv * tmpAccessTime;
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 3"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
tmpTazPntr->forwardStrategyUpdate(tmpNewLabel, tmpNewArrival, tmpNewStop, tmpNewCost);
}
}
}
return numIterations;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
int backwardTBHP(string _destination, double _PAT, int _timeBuffer, int _mode){
int i, j, numIterations, permanentLabel, tmpTransferStop, tmpAccessType;
int tmpNumAccess, tmpNumTransfers, tmpSeqNum;
double tmpCurrentLabel, tmpLatestDeparture, tmpEarliestDeparture, tmpOldLabel, tmpNewLabel, tmpNewCost, tmpNonWalkLabel;
double tmpAccessTime, tmpTransferTime, tmpNewDeparture, tmpNewArrival, tmpInVehTime, tmpWaitingTime;
string buf, tmpStr, tmpQueuvalue, tmpCurrentStop, tmpNewStop, tmpAccessibleTrips, tmpTrip, tmpCurrentMode, tmpNewMode;
char chr[99];
vector<string> tokens;
list<taz*>::iterator tmpTazListIter;
taz* tmpTazPntr;
list<stop*>::iterator tmpStopListIter;
stop* tmpStopPntr;
list<trip*>::iterator tmpTripListIter;
trip* tmpTripPntr;
priority_queue<string> stopQueue;
//Initialization
for(tmpTazListIter=tazList.begin();tmpTazListIter!=tazList.end();tmpTazListIter++){
tmpTazPntr = NULL;
tmpTazPntr = *tmpTazListIter;
tmpTazPntr->resetTazStrategy();
}
for(tmpStopListIter=stopList.begin();tmpStopListIter!=stopList.end();tmpStopListIter++){
tmpStopPntr = NULL;
tmpStopPntr = *tmpStopListIter;
tmpStopPntr->resetStopStrategy();
}
for(tmpTripListIter=tripList.begin();tmpTripListIter!=tripList.end();tmpTripListIter++){
tmpTripPntr = NULL;
tmpTripPntr = *tmpTripListIter;
tmpTripPntr->resetTripUsedBefore(0);
}
stopQueue.empty();
//Egress to Destination TAZ
tmpTazPntr = NULL;
tmpTazPntr = tazSet[_destination];
tmpTazPntr->backwardStrategyUpdate(1, _PAT, "End", 1);
tmpNumAccess = tmpTazPntr->getNumStops();
for(i=0;i<tmpNumAccess;i++){
tmpAccessType = tmpTazPntr->getAccessType(i);
if(tmpAccessType==2) continue;
tmpNewStop = tmpTazPntr->getStop(i);
tmpAccessTime = tmpTazPntr->getAccessTime(i);
tmpNewCost = 1 + destinationWalkEqv * tmpAccessTime;
tmpNewLabel = tmpNewCost;
tmpNewDeparture = _PAT - tmpAccessTime;
stopSet[tmpNewStop]->backwardStrategyUpdate(tmpNewLabel, tmpNewDeparture, "Egress", _destination, tmpNewCost, -101);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
//Labeling loop
numIterations = 0;
while(stopQueue.size()>0){
tmpStr = stopQueue.top();
stopQueue.pop();
tmpCurrentStop = tmpStr.substr(6,99);
tmpStopPntr = NULL;
tmpStopPntr = stopSet[tmpCurrentStop];
tmpCurrentLabel = tmpStopPntr->getStrategyLabel();
tmpNonWalkLabel = tmpStopPntr->getBackwardNonWalkLabel();
tmpLatestDeparture = tmpStopPntr->getStrategyLatestDeparture();
tmpEarliestDeparture = tmpStopPntr->getStrategyEarliestDeparture();
tmpCurrentMode = tmpStopPntr->getDepartureTripId(0);
permanentLabel = tmpStopPntr->getStrategyPermanentLabel();
tmpTransferStop = tmpStopPntr->getTransferStop();
if(permanentLabel==1 || tmpTransferStop==0){
continue;
}else{
tmpStopPntr->setStrategyPermanentLabel();
}
//Update by Transfers
if(tmpCurrentMode!="Egress"){
tmpNumTransfers = tmpStopPntr->getNumTransfers();
for(i=0;i<tmpNumTransfers;i++){
tmpNewStop = tmpStopPntr->getTransfer(i);
tmpTransferTime = tmpStopPntr->getTransferTime(i);
tmpNewCost = tmpNonWalkLabel + transferWalkEqv * tmpTransferTime;
tmpOldLabel = stopSet[tmpNewStop]->getStrategyLabel();
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 1"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
tmpNewDeparture = tmpLatestDeparture - tmpTransferTime;
stopSet[tmpNewStop]->backwardStrategyUpdate(tmpNewLabel, tmpNewDeparture, "Transfer", tmpCurrentStop, tmpNewCost, -101);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
}
}
//Update by Trips
tmpAccessibleTrips = tmpStopPntr->getBackwardAccessibleTrips(tmpLatestDeparture, _timeBuffer);
buf.clear();
tokens.clear();
stringstream ss(tmpAccessibleTrips);
while (ss >> buf){
tokens.push_back(buf);
}
for(i=0;i<tokens.size();i=i+2){
tmpTrip = tokens[i];
tmpSeqNum = atoi(tokens[i+1].c_str());
tmpTripPntr = tripSet[tmpTrip];
if (tmpTripPntr->getTripUsedBefore(0)==1){
continue;
}
for(j=1;j<tmpSeqNum;j++){
//for(j=max(1,tmpSeqNum-1);j<tmpSeqNum;j++){ //LB
tmpNewStop = tmpTripPntr->getStop(j);
tmpNewMode = stopSet[tmpNewStop]->getDepartureTripId(0);
if(tmpNewMode=="Egress"){
continue;
}
tmpNewDeparture = tmpTripPntr->getSchDeparture(j);
tmpNewArrival = tmpTripPntr->getSchArrival(tmpSeqNum);
tmpInVehTime = tmpNewArrival - tmpNewDeparture;
tmpWaitingTime = tmpLatestDeparture - tmpNewArrival;
if(tmpCurrentMode=="Transfer" || tmpCurrentMode.substr(0,1)=="t"){
tmpNewCost = tmpCurrentLabel + tmpInVehTime + waitingEqv * tmpWaitingTime + 60.0*fare/VOT + transferPenalty;
}else{
tmpNewCost = tmpCurrentLabel + tmpInVehTime + scheduleDelayEqv * tmpWaitingTime + 60.0*fare/VOT;
}
/*if ((tmpTripPntr->getRouteId()).length()>3 && (tmpTripPntr->getRouteId()).substr(1,1)=="9"){
tmpNewCost = tmpNewCost + (60.0*1.50)/VOT;
}*/
tmpOldLabel = stopSet[tmpNewStop]->getStrategyLabel();
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 2"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
stopSet[tmpNewStop]->backwardStrategyUpdate(tmpNewLabel, tmpNewDeparture, tmpTrip, tmpCurrentStop, tmpNewCost, tmpNewArrival);
sprintf(chr,"%d",int(999999-tmpNewLabel*1000));
tmpQueuvalue = string(chr);
tmpStr.resize(6-tmpQueuvalue.length(),'0');
tmpQueuvalue = tmpStr + tmpQueuvalue + tmpNewStop;
stopQueue.push(tmpQueuvalue);
}
}
tmpTripPntr->setTripUsedBefore(0);
}
numIterations++;
}
//Connect to All Other TAZ Centroid
for(tmpTazListIter=tazList.begin();tmpTazListIter!=tazList.end();tmpTazListIter++){
tmpTazPntr = NULL;
tmpTazPntr = *tmpTazListIter;
tmpNumAccess = tmpTazPntr->getNumStops();
for(i=0;i<tmpNumAccess;i++){
tmpAccessType = tmpTazPntr->getAccessType(i);
if(_mode==3 && tmpAccessType==2) continue;
if(_mode==2 && tmpAccessType==1) continue;
tmpOldLabel = tmpTazPntr->getStrategyLabel();
tmpNewStop = tmpTazPntr->getStop(i);
tmpAccessTime = tmpTazPntr->getAccessTime(i);
tmpNewDeparture = stopSet[tmpNewStop]->getStrategyEarliestDeparture() - tmpAccessTime;
tmpNonWalkLabel = stopSet[tmpNewStop]->getBackwardNonWalkLabel();
tmpNewCost = tmpNonWalkLabel + originWalkEqv * tmpAccessTime;
if(tmpOldLabel == 999999){
tmpNewLabel = tmpNewCost;
}else{
tmpNewLabel = exp(-1.0*theta*tmpOldLabel)+exp(-1.0*theta*tmpNewCost);
tmpNewLabel = max(0.01,-1.0/theta*log(tmpNewLabel));
}
if(tmpNewLabel < 0){
cout <<"Error - Negative Label - 3"<<endl;
}
if(tmpNewLabel < 999 && tmpNewLabel > 0){
tmpTazPntr->backwardStrategyUpdate(tmpNewLabel, tmpNewDeparture, tmpNewStop, tmpNewCost);
}
}
}
return numIterations;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
string getForwardElementaryPath(string _destination, double _PAT){
int tmpStrLen;
string tmpStr, tmpIn, tmpCurrentStop, tmpNewStop, tmpCurrentTrip, tmpAccessLink, tmpTransferLink, tmpLastTrip, tmpFirstTrip, tmpFirstStop;
double tmpDepartureTime, tmpStartTime, tmpEndTime;
string buf, tmpBoardingStops, tmpAlightingStops, tmpTrips, tmpWalkingTimes, tmpPath;
vector<string> tokens;
char chr[99];
tmpIn = tazSet[_destination]->getForwardAssignedAlternative(1800);
if(tmpIn=="-101"){
//cout <<"C1"<<endl;
return "-101";
}
buf.clear();
tokens.clear();
stringstream ss(tmpIn);
while (ss >> buf){
tokens.push_back(buf);
}
tmpCurrentStop = tokens[0];
tmpEndTime = atof(tokens[1].c_str())/100;
tmpAccessLink = _destination + "," + tmpCurrentStop;
sprintf(chr,"%d",int(100*accessTimes[tmpAccessLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpIn.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpWalkingTimes = tmpWalkingTimes + "0";
tmpWalkingTimes = tmpWalkingTimes + tmpIn.substr(max(0,tmpStrLen-2),2);
tmpDepartureTime = tmpEndTime - accessTimes[tmpAccessLink];
tmpLastTrip = "Egress";
while(1){
tmpIn = stopSet[tmpCurrentStop]->getForwardAssignedAlternative(tmpDepartureTime, tmpLastTrip);
if(tmpIn=="-101"){
//cout <<"C2"<<endl;
return "-101";
}
buf.clear();
tokens.clear();
stringstream ss(tmpIn);
while (ss >> buf){
tokens.push_back(buf);
}
tmpNewStop = tokens[0];
tmpCurrentTrip = tokens[1];
if(tmpNewStop.substr(0,1)=="s"){
tmpFirstStop = tmpNewStop;
}
if(tmpCurrentTrip.substr(0,1)=="t"){
tmpFirstTrip = tmpCurrentTrip;
}
if(tmpCurrentTrip=="Access"){
tmpAccessLink = tmpNewStop + "," + tmpCurrentStop;
sprintf(chr,"%d",int(100*accessTimes[tmpAccessLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpIn.substr(max(0,tmpStrLen-2),2) + "," + tmpWalkingTimes;
if(tmpStrLen<2) tmpWalkingTimes = "0" + tmpWalkingTimes;
tmpWalkingTimes = tmpIn.substr(0,max(0,tmpStrLen-2)) + "." + tmpWalkingTimes;
tmpStartTime = tripSet[tmpFirstTrip]->getSchDepartureByStop(tmpFirstStop) - accessTimes[tmpAccessLink];
sprintf(chr,"%d",int(100*tmpStartTime));
tmpStr = string(chr);
tmpStrLen = tmpStr.length();
tmpPath = tmpStr.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpPath = tmpPath + "0";
tmpPath = tmpPath + tmpStr.substr(max(0,tmpStrLen-2),2);
tmpPath.append("\t");
tmpPath.append(tmpBoardingStops);
tmpPath.append("\t");
tmpPath.append(tmpTrips);
tmpPath.append("\t");
tmpPath.append(tmpAlightingStops);
tmpPath.append("\t");
tmpPath.append(tmpWalkingTimes);
return tmpPath;
}else if(tmpCurrentTrip=="Transfer"){
tmpTransferLink = tmpCurrentStop + "," + tmpNewStop;
sprintf(chr,"%d",int(100*transferTimes[tmpTransferLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpIn.substr(max(0,tmpStrLen-2),2) + "," + tmpWalkingTimes;
if(tmpStrLen<2) tmpWalkingTimes = "0" + tmpWalkingTimes;
tmpWalkingTimes = tmpIn.substr(0,max(0,tmpStrLen-2)) + "." + tmpWalkingTimes;
tmpDepartureTime = tmpDepartureTime - transferTimes[tmpTransferLink];
tmpLastTrip = tmpCurrentTrip;
}else if(tmpCurrentTrip.substr(0,1)=="t"){
if(tmpBoardingStops!="") tmpBoardingStops = "," + tmpBoardingStops;
tmpBoardingStops = tmpNewStop + tmpBoardingStops;
if(tmpAlightingStops!="") tmpAlightingStops = "," + tmpAlightingStops;
tmpAlightingStops = tmpCurrentStop + tmpAlightingStops;
if(tmpTrips!="") tmpTrips = "," + tmpTrips;
tmpTrips = tmpCurrentTrip + tmpTrips;
if(tmpLastTrip.substr(0,1)=="t") tmpWalkingTimes = "0," + tmpWalkingTimes;
tmpDepartureTime = atof(tokens[3].c_str())/100;
tmpLastTrip = tmpCurrentTrip;
}else{
cout <<"ERROR - TripId: "<<tmpCurrentTrip<<endl;
return "-101";
}
tmpCurrentStop = tmpNewStop;
}
//cout <<"C3"<<endl;
return "-101";
}///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
string getBackwardElementaryPath(string _origin, double _PDT){
int i, tmpStrLen;
string tmpStr, tmpIn, tmpCurrentStop, tmpNewStop, tmpCurrentTrip, tmpAccessLink, tmpTransferLink, tmpLastTrip, tmpFirstTrip, tmpFirstStop;
double tmpArrivalTime, tmpStartTime, tmpDepartureTime;
string buf, tmpBoardingStops, tmpAlightingStops, tmpTrips, tmpWalkingTimes, tmpPath;
vector<string> tokens;
char chr[99];
tmpIn = tazSet[_origin]->getBackwardAssignedAlternative(0);
if(tmpIn=="-101"){
//cout <<"C1"<<endl;
return "-101";
}
buf.clear();
tokens.clear();
stringstream ss(tmpIn);
while (ss >> buf){
tokens.push_back(buf);
}
tmpCurrentStop = tokens[0];
tmpStartTime = atof(tokens[1].c_str())/100;
sprintf(chr,"%d",int(100*tmpStartTime));
tmpStr = string(chr);
tmpStrLen = tmpStr.length();
tmpPath = tmpStr.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpPath = tmpPath + "0";
tmpPath = tmpPath + tmpStr.substr(max(0,tmpStrLen-2),2);
tmpAccessLink = _origin + "," + tmpCurrentStop;
sprintf(chr,"%d",int(100*accessTimes[tmpAccessLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpWalkingTimes + tmpIn.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpWalkingTimes = tmpWalkingTimes + "0";
tmpWalkingTimes = tmpWalkingTimes + tmpIn.substr(max(0,tmpStrLen-2),2);
tmpArrivalTime = tmpStartTime + accessTimes[tmpAccessLink];
tmpLastTrip = "Access";
if(tmpCurrentStop.substr(0,1)=="s"){
tmpFirstStop = tmpCurrentStop;
}
i = 0;
while(1){
tmpIn = stopSet[tmpCurrentStop]->getBackwardAssignedAlternative(tmpArrivalTime, tmpLastTrip);
if(tmpIn=="-101"){
//cout <<"C2"<<endl;
return "-101";
}
i++;
buf.clear();
tokens.clear();
stringstream ss(tmpIn);
while (ss >> buf){
tokens.push_back(buf);
}
tmpNewStop = tokens[0];
tmpCurrentTrip = tokens[1];
tmpDepartureTime = atof(tokens[2].c_str())/100;
if(tmpCurrentTrip.substr(0,1)=="t"){
tmpFirstTrip = tmpCurrentTrip;
}
if(i==1){
tmpStartTime = tripSet[tmpFirstTrip]->getSchDepartureByStop(tmpFirstStop) - accessTimes[tmpAccessLink];
sprintf(chr,"%d",int(100*tmpStartTime));
tmpStr = string(chr);
tmpStrLen = tmpStr.length();
tmpPath = tmpStr.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpPath = tmpPath + "0";
tmpPath = tmpPath + tmpStr.substr(max(0,tmpStrLen-2),2);
}
if(tmpCurrentTrip=="Egress"){
tmpAccessLink = tmpNewStop + "," + tmpCurrentStop;
sprintf(chr,"%d",int(100*accessTimes[tmpAccessLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpWalkingTimes + "," + tmpIn.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpWalkingTimes = tmpWalkingTimes + "0";
tmpWalkingTimes = tmpWalkingTimes + tmpIn.substr(max(0,tmpStrLen-2),2);
tmpPath.append("\t");
tmpPath.append(tmpBoardingStops);
tmpPath.append("\t");
tmpPath.append(tmpTrips);
tmpPath.append("\t");
tmpPath.append(tmpAlightingStops);
tmpPath.append("\t");
tmpPath.append(tmpWalkingTimes);
return tmpPath;
}else if(tmpCurrentTrip=="Transfer"){
tmpTransferLink = tmpCurrentStop + "," + tmpNewStop;
sprintf(chr,"%d",int(100*transferTimes[tmpTransferLink]));
tmpIn = string(chr);
tmpStrLen = tmpIn.length();
tmpWalkingTimes = tmpWalkingTimes + "," + tmpIn.substr(0,max(0,tmpStrLen-2)) + ".";
if(tmpStrLen<2) tmpWalkingTimes = tmpWalkingTimes + "0";
tmpWalkingTimes = tmpWalkingTimes + tmpIn.substr(max(0,tmpStrLen-2),2);
tmpArrivalTime = tmpArrivalTime + transferTimes[tmpTransferLink];
tmpLastTrip = tmpCurrentTrip;
}else if(tmpCurrentTrip.substr(0,1)=="t"){
if(tmpBoardingStops!="") tmpBoardingStops.append(",");
tmpBoardingStops.append(tmpCurrentStop);
if(tmpAlightingStops!="") tmpAlightingStops.append(",");
tmpAlightingStops.append(tmpNewStop);
if(tmpTrips!="") tmpTrips.append(",");
tmpTrips.append(tmpCurrentTrip);
if(tmpLastTrip.substr(0,1)=="t") tmpWalkingTimes.append(",0");
tmpArrivalTime = atof(tokens[3].c_str())/100;
tmpLastTrip = tmpCurrentTrip;
}else{
cout <<"ERROR - TripId: "<<tmpCurrentTrip<<endl;
return "-101";
}
tmpCurrentStop = tmpNewStop;
}
//cout <<"C3"<<endl;
return "-101";
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int disaggregateStochasticAssignment(int _iter, int _timeBuff, int _numThreads){
int k, numThreads, tmpNumPassengers, tmpNumPaths;
double startTime, endTime, cpuTime;
numThreads = _numThreads;
parallelizeStops(numThreads);
parallelizeTazs(numThreads);
parallelizeTrips(numThreads);
cout <<"**************************** GENERATING PATHS ****************************"<<endl;
tmpNumPassengers = passengerSet.size();
tmpNumPaths = 0;
startTime = clock()*1.0/CLOCKS_PER_SEC;
omp_set_dynamic(0);
omp_set_num_threads(numThreads);
// cout <<"getNumThreads = "<<omp_get_num_threads()<<endl;
#pragma omp parallel for //num_threads(numThreads)
for(k=0;k<tmpNumPassengers;k++){
int threadId, tmpNumIterations, tmpTourHalf, tmpStatus, m, tmpMode;
string tmpPassengerId, tmpOriginTaz, tmpDestinationTaz, tmpPath;
double tmpPDT, tmpPAT;
passenger* passengerPntr;
map<string,passenger*>::iterator tmpPassengerIter;
threadId = omp_get_thread_num();
tmpPassengerIter = passengerSet.begin();
advance(tmpPassengerIter, k);
if(tmpPassengerIter==passengerSet.end()) continue;
tmpPassengerId = (*tmpPassengerIter).first;
passengerPntr = NULL;
passengerPntr = passengerSet[tmpPassengerId];
tmpOriginTaz =passengerPntr->getOriginTAZ();
tmpDestinationTaz = passengerPntr->getDestinationTAZ();
if(tazSet.find(tmpOriginTaz)==tazSet.end() || tazSet.find(tmpDestinationTaz)==tazSet.end()) continue;
if(tmpOriginTaz==tmpDestinationTaz) continue;
tmpStatus = passengerPntr->getPassengerStatus();
if(_iter>1){
if(tmpStatus==5){
tmpNumPaths++;
continue;
}else{
passengerPntr->setAssignedPath("");
passengerPntr->setPassengerStatus(-1);
}
}
tmpPDT = passengerPntr->getPDT();
tmpPAT = passengerPntr->getPAT();
tmpTourHalf = passengerPntr->getTourHalf();
tmpMode = passengerPntr->getMode();
if(tmpTourHalf==1){
tmpNumIterations = backwardTBHP(tmpDestinationTaz, tmpPAT, _timeBuff, tmpMode);
m = 0;
while(1){
tmpPath = getBackwardElementaryPath(tmpOriginTaz, tmpPDT);
m++;
if(tmpPath!="-101" || m>1000){
break;
}
}
}else if(tmpTourHalf==2){
tmpNumIterations = forwardTBHP(tmpOriginTaz, tmpPDT, _timeBuff, tmpMode);
m = 0;
while(1){
tmpPath = getForwardElementaryPath(tmpDestinationTaz, tmpPAT);
m++;
if(tmpPath!="-101" || m>1000){
break;
}
}
}
#pragma omp critical
if(tmpPath!="-101"){
passengerPntr->setAssignedPath(tmpPath);
tmpNumPaths++;
}
#pragma omp critical
if(k%max(min(tmpNumPassengers/10,1000),10)==0){
endTime = clock()*1.0/CLOCKS_PER_SEC;
cpuTime = round(100 * (endTime - startTime))/100.0;
cout <<k<<" ( "<<tmpNumPaths<<" )\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
}
}
endTime = clock()*1.0/CLOCKS_PER_SEC;
cpuTime = round(100 * (endTime - startTime))/100.0;
cout <<k<<"\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
return tmpNumPaths;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int pathBasedStochasticAssignment(int _iter, int _timeBuff, int _printPassengersFlag, int _numThreads){
int k, numThreads, tmpNumPassengers, tmpNumPaths;
double startTime, endTime, cpuTime;
map<string,int> tmpPathSet;
map<string,int>::iterator tmpPathSetIter;
numThreads = _numThreads;
parallelizeStops(numThreads);
parallelizeTazs(numThreads);
parallelizeTrips(numThreads);
ofstream logFile;
logFile.open("ft_log.txt");
logFile <<"Started assignment at: "<<getTime()<<endl;
ofstream outFile;
if(_printPassengersFlag==1){
if(_iter==1){
outFile.open("ft_output_choiceSet.dat");
//outFile <<"passengerId\torigin\tdestination\tnumOfOccur\tpath"<<endl;
}else{
outFile.open("ft_output_choiceSet.dat",ofstream::app);
outFile <<"ITERATION "<<_iter<<endl;
}
}
cout <<"**************************** GENERATING PATHS ****************************"<<endl;
startTime = clock()*1.0/CLOCKS_PER_SEC;
tmpNumPassengers = passengerSet.size();
tmpNumPaths = 0;
omp_set_dynamic(0);
omp_set_num_threads(numThreads);
// cout <<"getNumThreads = "<<omp_get_num_threads()<<endl;
#pragma omp parallel for //num_threads(numThreads)
for(k=0;k<tmpNumPassengers;k++){
int threadId, tmpNumIterations, tmpTourHalf, tmpStatus, n, tmpMode;
string tmpPassengerId, tmpOriginTaz, tmpDestinationTaz, tmpPath;
double tmpPDT, tmpPAT;
passenger* passengerPntr;
map<string,passenger*>::iterator tmpPassengerIter;
threadId = omp_get_thread_num();;
tmpPassengerIter = passengerSet.begin();
advance(tmpPassengerIter, k);
if(tmpPassengerIter==passengerSet.end()) continue;
tmpPassengerId = (*tmpPassengerIter).first;
passengerPntr = NULL;
passengerPntr = passengerSet[tmpPassengerId];
tmpOriginTaz =passengerPntr->getOriginTAZ();
tmpDestinationTaz = passengerPntr->getDestinationTAZ();
if(tazSet.find(tmpOriginTaz)==tazSet.end() || tazSet.find(tmpDestinationTaz)==tazSet.end()) continue;
if(tmpOriginTaz==tmpDestinationTaz) continue;
tmpStatus = passengerPntr->getPassengerStatus();
if(_iter>1){
if(tmpStatus==5){
tmpNumPaths++;
continue;
}else if(tmpStatus==4){
passengerPntr->setAssignedPath("");
passengerPntr->analyzePaths();
tmpPath = passengerPntr->assignPath();
if(tmpPath!="-101"){
passengerPntr->setAssignedPath(tmpPath);
tmpNumPaths++;
}
continue;
}else{
passengerPntr->resetPaths();
passengerPntr->setAssignedPath("");
passengerPntr->setPassengerStatus(-1);
continue;
}
}
tmpPDT = passengerPntr->getPDT();
tmpPAT = passengerPntr->getPAT();
tmpTourHalf = passengerPntr->getTourHalf();
tmpMode = passengerPntr->getMode();
tmpPathSet.clear();
if (tmpTourHalf==1){
tmpNumIterations = backwardTBHP(tmpDestinationTaz, tmpPAT, _timeBuff, tmpMode);
for (n=1;n<=1000;n++){
tmpPath = getBackwardElementaryPath(tmpOriginTaz, tmpPDT);
if(tmpPath!="-101"){
tmpPathSetIter = tmpPathSet.find(tmpPath);
if (tmpPathSetIter==tmpPathSet.end())
tmpPathSet[tmpPath] = 1;
else{
tmpPathSet[tmpPath] = tmpPathSet[tmpPath] + 1;
}
passengerPntr->addPaths(tmpPath);
}
}
}else{
tmpNumIterations = forwardTBHP(tmpOriginTaz, tmpPDT, _timeBuff, tmpMode);
for (n=1;n<=1000;n++){
tmpPath = getForwardElementaryPath(tmpDestinationTaz, tmpPAT);
if(tmpPath!="-101"){
tmpPathSetIter = tmpPathSet.find(tmpPath);
if (tmpPathSetIter==tmpPathSet.end())
tmpPathSet[tmpPath] = 1;
else{
tmpPathSet[tmpPath] = tmpPathSet[tmpPath] + 1;
}
passengerPntr->addPaths(tmpPath);
}
}
}
if(_printPassengersFlag==1){
for(tmpPathSetIter=tmpPathSet.begin();tmpPathSetIter!=tmpPathSet.end();tmpPathSetIter++){
outFile <<tmpPassengerId.substr(1,99)<<"\t"<<tmpOriginTaz.substr(1,99)<<"\t"<<tmpDestinationTaz.substr(1,99)<<"\t"<<(*tmpPathSetIter).second<<"\t"<<(*tmpPathSetIter).first<<endl;
}
}
passengerPntr->analyzePaths();
tmpPath = passengerPntr->assignPath();
#pragma omp critical
if(tmpPath!="-101"){
passengerPntr->setAssignedPath(tmpPath);
tmpNumPaths++;
}
#pragma omp critical
if(k%max(min(tmpNumPassengers/10,1000),10)==0){
endTime = clock()*1.0/CLOCKS_PER_SEC;
cpuTime = round(100 * (endTime - startTime))/100.0;
cout <<k<<" ( "<<tmpNumPaths<<" )\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
logFile <<k<<" ( "<<tmpNumPaths<<" )\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
}
}
endTime = clock()*1.0/CLOCKS_PER_SEC;
cpuTime = round(100 * (endTime - startTime))/100.0;
cout <<k<<"\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
logFile <<k<<"\t/\t"<<tmpNumPassengers<<"\tpassengers assigned;\ttime elapsed:\t"<<cpuTime<<"\tseconds"<<endl;
if(_printPassengersFlag==1){
outFile.close();
}
logFile <<"Finished assignment at: "<<getTime()<<endl;
logFile.close();
return tmpNumPaths;
}
|
SpatialAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialAveragePooling.c"
#else
#include <THNN/generic/pooling_shape.h>
#include <algorithm>
static inline void THNN_(SpatialAveragePooling_shapeCheck)(
THTensor *input, THTensor *gradOutput,
int kH, int kW, int dH, int dW, int padH, int padW,
bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t nOutputPlane = nInputPlane;
int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
void THNN_(SpatialAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
scalar_t *output_data;
scalar_t *input_data;
int dimw = 2;
int dimh = 1;
int dimc = 0;
int64_t nbatch = 1;
int64_t inputWidth;
int64_t inputHeight;
int64_t outputWidth;
int64_t outputHeight;
int64_t nInputPlane; // number of channels (or colors)
int64_t k;
THNN_(SpatialAveragePooling_shapeCheck)
(input, NULL, kH, kW, dH, dW, padH, padW, ceil_mode);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
dimc++;
}
inputWidth = input->size(dimw);
inputHeight = input->size(dimh);
nInputPlane = input->size(dimc);
outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
if (input->dim() == 3)
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
else
THTensor_(resize4d)(output, input->size(0), nInputPlane, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
int64_t xx, yy;
/* For all output pixels... */
scalar_t *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight;
scalar_t *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
int64_t i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = 0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
/* Compute the mean of the input image... */
int64_t hstart = yy * dH - padH;
int64_t wstart = xx * dW - padW;
int64_t hend = std::min(hstart + kH, inputHeight + padH);
int64_t wend = std::min(wstart + kW, inputWidth + padW);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = std::max(hstart, (int64_t) 0);
wstart = std::max(wstart, (int64_t) 0);
hend = std::min(hend, inputHeight);
wend = std::min(wend, inputWidth);
scalar_t sum = 0;
int divide_factor;
if(count_include_pad)
divide_factor = pool_size;
else
divide_factor = (hend - hstart) * (wend - wstart);
int64_t kx, ky;
for(ky = hstart; ky < hend; ky++)
{
for(kx = wstart; kx < wend; kx++)
sum += ptr_input[ky*inputWidth + kx];
}
/* Update output */
*ptr_output++ += sum/divide_factor;
}
}
}
}
c10::raw::intrusive_ptr::decref(input);
}
void THNN_(SpatialAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
int dimw = 2;
int dimh = 1;
int dimc = 0;
int64_t nbatch = 1;
int64_t ndim = 3;
int64_t inputWidth;
int64_t inputHeight;
int64_t outputWidth;
int64_t outputHeight;
int64_t nInputPlane; // number of channels (or colors)
scalar_t *gradOutput_data;
scalar_t *gradInput_data;
int64_t k;
THNN_(SpatialAveragePooling_shapeCheck)
(input, gradOutput, kH, kW, dH, dW, padH, padW, ceil_mode);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
dimc++;
ndim = 4;
}
inputWidth = input->size(dimw);
inputHeight = input->size(dimh);
nInputPlane = input->size(dimc);
outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
THTensor_(resizeAs)(gradInput, input);
gradOutput = THTensor_(newContiguous)(gradOutput);
THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous");
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
scalar_t *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
int64_t xx, yy;
scalar_t* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
scalar_t *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
int64_t i;
for(i=0; i<inputWidth*inputHeight; i++)
ptr_gi[i] = 0.0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
int64_t hstart = yy * dH - padH;
int64_t wstart = xx * dW - padW;
int64_t hend = std::min(hstart + kH, inputHeight + padH);
int64_t wend = std::min(wstart + kW, inputWidth + padW);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = std::max(hstart, (int64_t) 0);
wstart = std::max(wstart, (int64_t) 0);
hend = std::min(hend, inputHeight);
wend = std::min(wend, inputWidth);
scalar_t z = *ptr_gradOutput++;
int divide_factor;
if(count_include_pad)
divide_factor = pool_size;
else
divide_factor = (hend - hstart) * (wend - wstart);
int64_t kx, ky;
for(ky = hstart ; ky < hend; ky++)
{
for(kx = wstart; kx < wend; kx++)
ptr_gradInput[ky*inputWidth + kx] += z/divide_factor;
}
}
}
}
}
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(32*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(16*t1+Nx+29,1024)),floord(32*t2+Nx+28,1024)),floord(8*t3+Nx+4,1024)),floord(32*t1-32*t2+Nz+Nx+27,1024));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),1024*t4+1022),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
single.c |
// OpenMP Single Example
// Inclusions
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Main
int main( int argc, char** argv ) {
int i = 0; // Loop Iterator
int n = 0; // Number of Iterations
// Parallel Region
#pragma omp parallel \
shared( n ) \
private( i )
{
#pragma omp for // Parallelize For Loop
for( i = 0; i < n; i++ ) { // Iterate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
#pragma omp single // Single Section - Executed by One Core
{
printf( "Thread %d of %d - Running Single Construct\n",
omp_get_thread_num( ),
omp_get_max_threads( ) );
}
}
return 0;
}
// End single.c - EWG SDG
|
parallel.h | #ifndef PARALLEL_H_
#define PARALLEL_H_
#ifndef SERIAL
#include <boost/mpi.hpp>
#endif
#include "omp.h"
#include "std.h"
#ifndef SERIAL
template <class T>
class vector_plus : public std::binary_function<std::vector<T>, std::vector<T>, std::vector<T>> {
public:
std::vector<T> operator()(const std::vector<T>& lhs, const std::vector<T>& rhs) const {
std::vector<T> v(lhs.size());
std::transform(lhs.begin(), lhs.end(), rhs.begin(), v.begin(), std::plus<T>());
return (v);
}
};
class Parallel {
private:
size_t id;
size_t n;
boost::mpi::environment* env; // For MPI 1.1.
boost::mpi::communicator world;
Parallel() {
id = world.rank();
n = world.size();
}
// Singleton pattern boilerplate.
static Parallel& get_instance() {
static Parallel instance;
return instance;
}
public:
static void init(boost::mpi::environment& env) { Parallel::get_instance().env = &env; }
static size_t get_id() { return Parallel::get_instance().id; }
static size_t get_n() { return Parallel::get_instance().n; }
static bool is_master() { return Parallel::get_instance().id == 0; }
static std::string get_host() { return Parallel::get_instance().env->processor_name(); }
static void barrier() {
fflush(stdout);
Parallel::get_instance().world.barrier();
}
static void print_info() {
Parallel::barrier();
#pragma omp parallel
{
if (omp_get_thread_num() == 0) {
printf(
"Proc %zu (%d threads) running on %s\n",
Parallel::get_id(),
omp_get_num_threads(),
Parallel::get_host().c_str());
}
}
Parallel::barrier();
}
template <class T>
static void reduce_to_sum(T& t) {
T t_local = t;
boost::mpi::all_reduce(Parallel::get_instance().world, t_local, t, std::plus<T>());
}
template <class T>
static void reduce_to_sum_vector(std::vector<T>& t) {
std::vector<T> t_local = t;
boost::mpi::all_reduce(Parallel::get_instance().world, t_local, t, vector_plus<T>());
}
};
#else
// Non-MPI stub for debugging and profiling.
class Parallel {
public:
static bool is_master() { return true; }
static size_t get_id() { return 0; }
static size_t get_n() { return 1; }
static std::string get_host() { return "localhost"; }
static void barrier() {}
static void print_info() { printf("Running in serial.\n"); }
template <class T>
static void reduce_to_sum(T& t) {}
};
#endif // SERIAL
#endif |
lexer.c | #include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdio.h>
#include <math.h>
#include "lexer.h"
static char OPERATOR_CHARS[] = {
'+', '-', '*', '/', '^', '=', '|', '&', '~', ':', '%', '<', '>', '?', '!'
};
static char* OPERATOR_LIST[] = {
"++", "--",
"+=", "-=", "*=", "/=", "^=",
"<=", ">=",
"+", "-", "*", "/", "^", "%", "!"
"<", ">", "&", "|", "~"
};
static int isOperator(char c) {
for (int i = 0; i < (sizeof(OPERATOR_CHARS) / sizeof(char)); i++) {
if (c == OPERATOR_CHARS[i]) {
return 1;
}
}
return 0;
}
static lex_array_t *lex_alloc(int capacity) {
lex_array_t *out = malloc(sizeof(lex_array_t) + sizeof(token_t*) * capacity);
if (out == NULL) {
printf("Memory allocation failed in %s on line %d\n", __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
out->tokens = (token_t**)(((char*)out) + sizeof(lex_array_t));
out->size = 0;
out->capacity = capacity;
return out;
}
static void lex_pushback(lex_array_t **lexArray, const char *string, int begin, int end, token_type_t type) {
lex_array_t *array = *lexArray;
if (type == _TYPE_OPERATOR_UNSPLIT) {
for (int i = 0; i < (sizeof(OPERATOR_LIST) / sizeof(char*)); i++) {
int opSize = strlen(OPERATOR_LIST[i]);
if (strncmp(string + begin, OPERATOR_LIST[i], opSize) == 0) {
lex_pushback(lexArray, string, begin, begin + opSize, TYPE_OPERATOR);
begin += opSize;
}
if (begin >= end) {
return;
}
}
}
if (array->size >= array->capacity) {
int newCapacity = array->capacity * 2;
array = realloc(array, sizeof(lex_array_t) + sizeof(token_t*) * newCapacity);
if (array == NULL) {
printf("Memory allocation failed in %s on line %d\n", __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
array->tokens = (token_t**)(((char*)array) + sizeof(lex_array_t));
array->capacity = newCapacity;
*lexArray = array;
}
int stringSize = end - begin;
token_t *token = malloc(sizeof(token_t) + sizeof(char) * (stringSize + 1));
if (token == NULL) {
printf("Memory allocation failed in %s on line %d\n", __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
token->type = type;
token->string = ((char*)token) + sizeof(token_t);
for (int i = 0; i < stringSize; i++) {
token->string[i] = tolower(string[i + begin]);
}
token->string[stringSize] = 0;
array->tokens[array->size] = token;
array->size++;
}
lex_array_t *lex(const char *input) {
lex_array_t *out = lex_alloc(16);
enum {
NO_STATE = 0,
NAME,
NUMBER,
NUMBER_W_DECIMAL,
NUMBER_HEX,
STRING,
STRING_ESCAPE,
OPERATOR,
COMMENT
};
int state = NO_STATE;
int begin = 0;
char beginChar = 0;
int i;
for (i = 0; input[i]; i++) {
char cur = input[i];
loop:
if (state == COMMENT) {
if (cur == '\n' || cur == '\r') {
state = NO_STATE;
continue;
}
continue;
} else if (cur == '#') {
token_type_t type;
switch (state) {
case NUMBER:
case NUMBER_W_DECIMAL:
case NUMBER_HEX:
type = TYPE_NUMBER_LITERAL;
break;
case STRING:
type = TYPE_STRING_LITERAL;
break;
case OPERATOR:
type = TYPE_OPERATOR;
break;
case NAME:
type = TYPE_IDENTIFIER;
break;
}
lex_pushback(&out, input, begin, i, type);
state = COMMENT;
continue;
}
if (state == NO_STATE) {
switch (cur) {
case '(':
lex_pushback(&out, input, i, i + 1, TYPE_L_PAREN);
continue;
case ')':
lex_pushback(&out, input, i, i + 1, TYPE_R_PAREN);
continue;
case '{':
lex_pushback(&out, input, i, i + 1, TYPE_L_CURLY);
continue;
case '}':
lex_pushback(&out, input, i, i + 1, TYPE_R_CURLY);
continue;
case '[':
lex_pushback(&out, input, i, i + 1, TYPE_L_BRACKET);
continue;
case ']':
lex_pushback(&out, input, i, i + 1, TYPE_R_BRACKET);
continue;
case ',':
lex_pushback(&out, input, i, i + 1, TYPE_OPERATOR);
continue;
}
if (cur == ' ' || cur == '\t' || cur == '\n' || cur == '\r') {
continue;
} else if ((cur >= 'a' && cur <= 'z') || (cur >= 'A' && cur <= 'Z') || cur == '_') {
state = NAME;
} else if (cur >= '0' && cur <= '9') {
state = NUMBER;
} else if (cur == '.') {
state = NUMBER_W_DECIMAL;
} else if (cur == '"' || cur == '\'') {
state = STRING;
} else if (isOperator(cur)) {
state = OPERATOR;
}
begin = i;
beginChar = cur;
continue;
} else {
switch (state) {
case NAME:
if ((cur >= 'a' && cur <= 'z') || (cur >= 'A' && cur <= 'Z') || cur == '_' || (cur >= '0' && cur <= '9')) {
continue;
} else {
lex_pushback(&out, input, begin, i, TYPE_IDENTIFIER);
state = NO_STATE;
goto loop;
}
break;
case NUMBER:
if (cur >= '0' && cur <= '9') {
continue;
} else if (tolower(cur) == 'x' && begin == (i - 1)) {
state = NUMBER_HEX;
continue;
} else if (cur == '.') {
state = NUMBER_W_DECIMAL;
continue;
} else {
lex_pushback(&out, input, begin, i, TYPE_NUMBER_LITERAL);
state = NO_STATE;
goto loop;
}
break;
case NUMBER_W_DECIMAL:
if (cur >= '0' && cur <= '9') {
continue;
} else {
lex_pushback(&out, input, begin, i, TYPE_NUMBER_LITERAL);
state = NO_STATE;
goto loop;
}
break;
case STRING_ESCAPE:
state = STRING;
continue;
case STRING:
if (cur == '"' && beginChar == '"') {
lex_pushback(&out, input, begin, i+1, TYPE_STRING_LITERAL);
state = NO_STATE;
continue;
} else if (cur == '\'' && beginChar == '\'') {
lex_pushback(&out, input, begin, i+1, TYPE_STRING_LITERAL);
state = NO_STATE;
continue;
} else if (cur == '\\'){
state = STRING_ESCAPE;
continue;
} else {
continue;
}
break;
case OPERATOR:
if (isOperator(cur)) {
continue;
} else {
lex_pushback(&out, input, begin, i, TYPE_OPERATOR);
state = NO_STATE;
goto loop;
}
break;
}
if (cur == ' ' || cur == '\t' || cur == '\n' || cur == '\r') {
token_type_t type;
switch (state) {
case NUMBER:
case NUMBER_W_DECIMAL:
case NUMBER_HEX:
type = TYPE_NUMBER_LITERAL;
break;
case STRING:
type = TYPE_STRING_LITERAL;
break;
case OPERATOR:
type = TYPE_OPERATOR;
break;
case NAME:
type = TYPE_IDENTIFIER;
break;
}
lex_pushback(&out, input, begin, i, type);
state = NO_STATE;
continue;
}
}
}
if (state != NO_STATE) {
token_type_t type;
switch (state) {
case NUMBER:
case NUMBER_W_DECIMAL:
case NUMBER_HEX:
type = TYPE_NUMBER_LITERAL;
break;
case STRING:
type = TYPE_STRING_LITERAL;
break;
case OPERATOR:
type = TYPE_OPERATOR;
break;
case NAME:
type = TYPE_IDENTIFIER;
break;
}
lex_pushback(&out, input, begin, i, type);
}
return out;
}
void lex_free(lex_array_t *array) {
#pragma omp parallel for
for (int i = 0; i < array->size; i++) {
free(array->tokens[i]);
}
free(array);
}
void lex_dump(const lex_array_t *array, const char *seperator) {
for (int i = 0; i < array->size; i++) {
printf("%s %s", array->tokens[i]->string, seperator);
}
} |
ptmp.c | /*
* Copyright (c) 2016-2018 Ilya Kaliman
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <err.h>
#ifdef LIBPT_USE_MPI
#include <mpi.h>
#endif
#include "pt.h"
extern void *(*libpt_malloc)(size_t);
extern void (*libpt_free)(void *);
void sgemm_(char *, char *, int *, int *, int *, float *, float *,
int *, float *, int *, float *, float *, int *);
static void
gemm(char transa, char transb, int m, int n, int k, float alpha,
const float *a, int lda, const float *b, int ldb, float beta,
float *c, int ldc)
{
sgemm_(&transa, &transb, &m, &n, &k, &alpha, (float *)a, &lda,
(float *)b, &ldb, &beta, c, &ldc);
}
static void
t2_i_ovvv_half(size_t o, size_t v, size_t i, size_t j, size_t k,
float *abc, const float *t2, const float *i_ovvv)
{
const float *t2_p = &t2[i*o*v*v+j*v*v];
const float *i_ovvv_p = &i_ovvv[k*v*v*(v-1)/2];
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', v, v*(v-1)/2, v, 1.0, t2_p, v,
i_ovvv_p, v*(v-1)/2, 0.0, abc, v);
}
static void
t2_baba_i_ovvv_aaaa_half(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_ovvv)
{
const float *t2_p = &t2[i*oa*vb*va+j*vb*va];
const float *i_ovvv_p = &i_ovvv[k*va*va*(va-1)/2];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', vb, va*(va-1)/2, va, 1.0, t2_p, va,
i_ovvv_p, va*(va-1)/2, 0.0, abc, vb);
}
static void
t2_aaaa_i_ovvv_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_ovvv)
{
const float *t2_p = &t2[i*oa*va*va+j*va*va];
const float *i_ovvv_p = &i_ovvv[k*va*vb*va];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, va, 1.0, t2_p, va,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_abab_i_ovvv_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_ovvv)
{
const float *t2_p = &t2[i*ob*va*vb+j*va*vb];
const float *i_ovvv_p = &i_ovvv[k*vb*va*vb];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, vb, 1.0, t2_p, vb,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_i_oovo(size_t o, size_t v, size_t i, size_t j, size_t k,
float *abc, const float *t2, const float *i_oovo)
{
const float *t2_p = &t2[i*o*v*v];
const float *i_oovo_p = &i_oovo[j*o*o*v+k*o*v];
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', v*v, v, o, 1.0, t2_p, v*v,
i_oovo_p, o, 0.0, abc, v*v);
}
static void
t2_aaaa_i_oovo_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_oovo)
{
const float *t2_p = &t2[i*oa*va*va];
const float *i_oovo_p = &i_oovo[j*oa*vb*oa+k*vb*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*va, vb, oa, 1.0, t2_p, va*va,
i_oovo_p, oa, 0.0, abc, va*va);
}
static void
t2_abab_i_oovo_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_oovo)
{
const float *t2_p = &t2[i*ob*va*vb];
const float *i_oovo_p = &i_oovo[j*ob*va*ob+k*va*ob];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, ob, 1.0, t2_p, va*vb,
i_oovo_p, ob, 0.0, abc, va*vb);
}
static void
t2_baba_i_oovo_aaaa(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, float *abc, const float *t2,
const float *i_oovo)
{
const float *t2_p = &t2[i*oa*vb*va];
const float *i_oovo_p = &i_oovo[j*oa*va*oa+k*va*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, oa, 1.0, t2_p, va*vb,
i_oovo_p, oa, 0.0, abc, va*vb);
}
static float
i_jk_a_bc_ov_oovv(size_t o, size_t v, const float *ov, const float *oovv,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c)
{
return +ov[i*v+a]*oovv[j*o*v*v+k*v*v+b*v+c]
-ov[j*v+a]*oovv[i*o*v*v+k*v*v+b*v+c]
-ov[k*v+a]*oovv[j*o*v*v+i*v*v+b*v+c]
-ov[i*v+b]*oovv[j*o*v*v+k*v*v+a*v+c]
+ov[j*v+b]*oovv[i*o*v*v+k*v*v+a*v+c]
+ov[k*v+b]*oovv[j*o*v*v+i*v*v+a*v+c]
-ov[i*v+c]*oovv[j*o*v*v+k*v*v+b*v+a]
+ov[j*v+c]*oovv[i*o*v*v+k*v*v+b*v+a]
+ov[k*v+c]*oovv[j*o*v*v+i*v*v+b*v+a];
}
static float
comp_t3b_ijkabc(size_t v1, size_t o2, size_t v2a, size_t v2b,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c,
const float *t1, const float *i_oovv, const float *f_ov,
const float *t2)
{
return t1[i*v1+a] * i_oovv[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c] +
f_ov[i*v1+a] * t2[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c];
}
static double
cc_pt_aaa(size_t oa, size_t va, const float *d_ov, const float *f_ov,
const float *t1, const float *t2_aaaa, const float *i_oovo_aaaa,
const float *i_oovv_aaaa, const float *i_ovvv_aaaa)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
float *t3ax1, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*va*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++) {
float t3ax, t3bx, dn;
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
t3ax = t3ax1[a*va*va+b*va+c];
t3bx = +i_jk_a_bc_ov_oovv(oa,va,t1,i_oovv_aaaa,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f_ov,t2_aaaa,i,j,k,a,b,c);
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
static double
cc_pt_aab(size_t oa, size_t va, size_t ob, size_t vb,
const float *d_ov_aa, const float *d_ov_bb,
const float *f_ov_aa, const float *f_ov_bb,
const float *t1_aa, const float *t1_bb,
const float *t2_aaaa, const float *t2_abab, const float *t2_baba,
const float *i_oovo_aaaa, const float *i_oovo_abab,
const float *i_oovo_baba, const float *i_oovv_aaaa,
const float *i_oovv_abab, const float *i_ovvv_aaaa,
const float *i_ovvv_abab, const float *i_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
float *t3ax1, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*vb*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*vb;
abc11 = t3ax1 + va*va*vb;
abc12 = t3ax1 + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i_ovvv_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++) {
float t3ax, t3bx, dn;
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t3bx = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
t1_bb,i_oovv_aaaa,f_ov_bb,t2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
t3ax = t3ax1[a*va*vb+b*vb+c];
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
double
libpt_rpt_mp(size_t oa, size_t va, const float *d_ov, const float *f_ov,
const float *t1, const float *t2, const float *i_oovo,
const float *i_oovv, const float *i_ovvv)
{
double e_pt = 0.0;
const float *t2_aaaa = t2;
const float *t2_abab = t2 + oa*oa*va*va;
const float *i_ovvv_aaaa = i_ovvv;
const float *i_ovvv_abab = i_ovvv + oa*va*va*(va-1)/2;
const float *i_oovo_aaaa = i_oovo;
const float *i_oovo_abab = i_oovo + oa*oa*oa*va;
const float *i_oovv_aaaa = i_oovv;
const float *i_oovv_abab = i_oovv + oa*oa*va*va;
e_pt += cc_pt_aaa(oa, va, d_ov, f_ov, t1, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
e_pt += cc_pt_aab(oa, va, oa, va, d_ov, d_ov, f_ov, f_ov, t1, t1,
t2_aaaa, t2_abab, t2_abab, i_oovo_aaaa, i_oovo_abab, i_oovo_abab,
i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab, i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_upt_mp(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov,
const float *f_ov, const float *t1, const float *t2,
const float *i_oovo, const float *i_oovv, const float *i_ovvv)
{
double e_pt = 0.0;
const float *d_ov_aa = d_ov;
const float *d_ov_bb = d_ov_aa + oa*va;
const float *f_ov_aa = f_ov;
const float *f_ov_bb = f_ov_aa + oa*va;
const float *t1_aa = t1;
const float *t1_bb = t1_aa + oa*va;
const float *t2_aaaa = t2;
const float *t2_abab = t2_aaaa + oa*oa*va*va;
const float *t2_bbbb = t2_abab + oa*ob*va*vb;
const float *t2_baba = t2_bbbb + ob*ob*vb*vb;
const float *i_oovo_aaaa = i_oovo;
const float *i_oovo_abab = i_oovo_aaaa + oa*oa*va*oa;
const float *i_oovo_bbbb = i_oovo_abab + oa*ob*va*ob;
const float *i_oovo_baba = i_oovo_bbbb + ob*ob*vb*ob;
const float *i_oovv_aaaa = i_oovv;
const float *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const float *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const float *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const float *i_ovvv_aaaa = i_ovvv;
const float *i_ovvv_abab = i_ovvv_aaaa + oa*va*va*(va-1)/2;
const float *i_ovvv_bbbb = i_ovvv_abab + oa*vb*va*vb;
const float *i_ovvv_baba = i_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_pt_aaa(oa, va, d_ov_aa, f_ov_aa, t1_aa, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_pt_aaa(ob, vb, d_ov_bb, f_ov_bb, t1_bb, t2_bbbb,
i_oovo_bbbb, i_oovv_bbbb, i_ovvv_bbbb);
/* aabaab */
e_pt += cc_pt_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f_ov_aa, f_ov_bb,
t1_aa, t1_bb, t2_aaaa, t2_abab, t2_baba, i_oovo_aaaa, i_oovo_abab,
i_oovo_baba, i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab,
i_ovvv_baba);
/* bbabba */
e_pt += cc_pt_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f_ov_bb, f_ov_aa,
t1_bb, t1_aa, t2_bbbb, t2_baba, t2_abab, i_oovo_bbbb, i_oovo_baba,
i_oovo_abab, i_oovv_bbbb, i_oovv_baba, i_ovvv_bbbb, i_ovvv_baba,
i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
static double
cc_ft_aaa(size_t oa, size_t va, const float *d_ov, const float *f2_ov,
const float *l1, const float *t2, const float *l2, const float *i_oovv,
const float *i2_t2f2_oovo, const float *i3_ovvv, const float *i6_oovo,
const float *i7_ovvv)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
float *sigvvvl, *sigvvvr, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*va*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + va*va*(va-1)/2;
abc1 = sigvvvl + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++, t++) {
float dn, l1t;
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
l1t = +i_jk_a_bc_ov_oovv(oa,va,l1,i_oovv,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f2_ov,l2,i,j,k,a,b,c);
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
static double
cc_ft_aab(size_t oa, size_t va, size_t ob, size_t vb,
const float *d_ov_aa, const float *d_ov_bb,
const float *f2_ov_aa, const float *f2_ov_bb,
const float *l1_aa, const float *l1_bb,
const float *t2_aaaa, const float *t2_abab, const float *t2_baba,
const float *l2_aaaa, const float *l2_abab, const float *l2_baba,
const float *i_oovv_aaaa, const float *i_oovv_abab,
const float *i2_t2f2_oovo_aaaa, const float *i2_t2f2_oovo_abab,
const float *i2_t2f2_oovo_baba,
const float *i3_ovvv_aaaa, const float *i3_ovvv_abab,
const float *i3_ovvv_baba,
const float *i6_oovo_aaaa, const float *i6_oovo_abab,
const float *i6_oovo_baba,
const float *i7_ovvv_aaaa, const float *i7_ovvv_abab,
const float *i7_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
float *sigvvvl, *sigvvvr, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*vb*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + vb*va*(va-1)/2;
abc1 = sigvvvl + va*va*vb;
abc11 = sigvvvl + va*va*vb;
abc12 = sigvvvl + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,l2_aaaa,i7_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,l2_baba,i7_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,l2_baba,i7_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,l2_baba,i6_oovo_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i3_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i3_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i3_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i2_t2f2_oovo_aaaa);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++, t++) {
float dn, l1t;
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
l1t = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
l1_bb,i_oovv_aaaa,f2_ov_bb,l2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
double
libpt_rft_mp(size_t oa, size_t va, const float *d_ov, const float *f2_ov,
const float *l1, const float *t2, const float *l2, const float *i_oovv,
const float *i2_t2f2_oovo, const float *i3_ovvv, const float *i6_oovo,
const float *i7_ovvv)
{
double e_pt = 0.0;
const float *t2_aaaa = t2;
const float *t2_abab = t2 + oa*oa*va*va;
const float *l2_aaaa = l2;
const float *l2_abab = l2 + oa*oa*va*va;
const float *i_oovv_aaaa = i_oovv;
const float *i_oovv_abab = i_oovv + oa*oa*va*va;
const float *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const float *i2_t2f2_oovo_abab = i2_t2f2_oovo + oa*oa*oa*va;
const float *i3_ovvv_aaaa = i3_ovvv;
const float *i3_ovvv_abab = i3_ovvv + oa*va*va*(va-1)/2;
const float *i6_oovo_aaaa = i6_oovo;
const float *i6_oovo_abab = i6_oovo + oa*oa*oa*va;
const float *i7_ovvv_aaaa = i7_ovvv;
const float *i7_ovvv_abab = i7_ovvv + oa*va*va*(va-1)/2;
e_pt += cc_ft_aaa(oa, va, d_ov, f2_ov, l1, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
e_pt += cc_ft_aab(oa, va, oa, va, d_ov, d_ov, f2_ov, f2_ov,
l1, l1, t2_aaaa, t2_abab, t2_abab, l2_aaaa, l2_abab, l2_abab,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_abab, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_abab,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_abab,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_uft_mp(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov,
const float *f2_ov, const float *l1, const float *t2, const float *l2,
const float *i_oovv, const float *i2_t2f2_oovo, const float *i3_ovvv,
const float *i6_oovo, const float *i7_ovvv)
{
double e_pt = 0.0;
const float *d_ov_aa = d_ov;
const float *d_ov_bb = d_ov_aa + oa*va;
const float *f2_ov_aa = f2_ov;
const float *f2_ov_bb = f2_ov_aa + oa*va;
const float *l1_aa = l1;
const float *l1_bb = l1_aa + oa*va;
const float *t2_aaaa = t2;
const float *t2_abab = t2_aaaa + oa*oa*va*va;
const float *t2_bbbb = t2_abab + oa*ob*va*vb;
const float *t2_baba = t2_bbbb + ob*ob*vb*vb;
const float *l2_aaaa = l2;
const float *l2_abab = l2_aaaa + oa*oa*va*va;
const float *l2_bbbb = l2_abab + oa*ob*va*vb;
const float *l2_baba = l2_bbbb + ob*ob*vb*vb;
const float *i_oovv_aaaa = i_oovv;
const float *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const float *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const float *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const float *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const float *i2_t2f2_oovo_abab = i2_t2f2_oovo_aaaa + oa*oa*va*oa;
const float *i2_t2f2_oovo_bbbb = i2_t2f2_oovo_abab + oa*ob*va*ob;
const float *i2_t2f2_oovo_baba = i2_t2f2_oovo_bbbb + ob*ob*vb*ob;
const float *i3_ovvv_aaaa = i3_ovvv;
const float *i3_ovvv_abab = i3_ovvv_aaaa + oa*va*va*(va-1)/2;
const float *i3_ovvv_bbbb = i3_ovvv_abab + oa*vb*va*vb;
const float *i3_ovvv_baba = i3_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
const float *i6_oovo_aaaa = i6_oovo;
const float *i6_oovo_abab = i6_oovo_aaaa + oa*oa*va*oa;
const float *i6_oovo_bbbb = i6_oovo_abab + oa*ob*va*ob;
const float *i6_oovo_baba = i6_oovo_bbbb + ob*ob*vb*ob;
const float *i7_ovvv_aaaa = i7_ovvv;
const float *i7_ovvv_abab = i7_ovvv_aaaa + oa*va*va*(va-1)/2;
const float *i7_ovvv_bbbb = i7_ovvv_abab + oa*vb*va*vb;
const float *i7_ovvv_baba = i7_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_ft_aaa(oa, va, d_ov_aa, f2_ov_aa, l1_aa, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_ft_aaa(ob, vb, d_ov_bb, f2_ov_bb, l1_bb, t2_bbbb, l2_bbbb,
i_oovv_bbbb, i2_t2f2_oovo_bbbb, i3_ovvv_bbbb, i6_oovo_bbbb,
i7_ovvv_bbbb);
/* aabaab */
e_pt += cc_ft_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f2_ov_aa, f2_ov_bb,
l1_aa, l1_bb, t2_aaaa, t2_abab, t2_baba, l2_aaaa, l2_abab, l2_baba,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_baba, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_baba,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_baba,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_baba);
/* bbabba */
e_pt += cc_ft_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f2_ov_bb, f2_ov_aa,
l1_bb, l1_aa, t2_bbbb, t2_baba, t2_abab, l2_bbbb, l2_baba, l2_abab,
i_oovv_bbbb, i_oovv_baba, i2_t2f2_oovo_bbbb, i2_t2f2_oovo_baba,
i2_t2f2_oovo_abab, i3_ovvv_bbbb, i3_ovvv_baba, i3_ovvv_abab,
i6_oovo_bbbb, i6_oovo_baba, i6_oovo_abab,
i7_ovvv_bbbb, i7_ovvv_baba, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
|
GB_unop__ainv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int64_int64)
// op(A') function: GB (_unop_tran__ainv_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_v1_ifAndFor.c | //! if statement without an explicit basic block as a body
#include <omp.h>
void foo(int j)
{
int i;
int a[100];
if (j != - 1) {
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = a[i] + 1;
}
}
}
|
softplus_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include "utility/float.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include <math.h>
#ifdef __APPLE__
#include <stdio.h>
#endif
int ref_softplus_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = log(exp(src[i]) + 1.0f);
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if(input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_softplus_fp32(input_tensor, output_tensor, exec_graph->num_thread);
else
printf("Input data type %d not to be supported.\n", input_tensor->data_type);
return ret;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* ir_graph = node->graph;
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {
.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score
};
int register_softplus_ref_op()
{
return register_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops);
}
int unregister_softplus_ref_op()
{
return unregister_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops);
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(16*t3+Nx+3,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
panama_fmt_plug.c | /* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_panama_;
#elif FMT_REGISTERS_H
john_register_one(&fmt_panama_);
#else
#include <string.h>
#include "arch.h"
#include "sph_panama.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 1 - 217k
// 64 - 1930k
// 128 - 2099k
// 256 - 2204k *** set to this level
// 512 - 2203k
// 1k - 2124k
#define OMP_SCALE 256
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Panama"
#define FORMAT_NAME ""
#define FORMAT_TAG "$panama$"
#define TAG_LENGTH 8
#define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests panama__tests[] = {
{"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != BINARY_SIZE*2)
return 0;
while(*p)
if(atoi16[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_panama_context ctx;
sph_panama_init(&ctx);
sph_panama(&ctx, saved_key[index], strlen(saved_key[index]));
sph_panama_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void panama_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[BINARY_SIZE*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strlen(hash) == BINARY_SIZE*2 && valid(hash, self)) {
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_panama_ = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
panama__tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
panama_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gsrb.flux.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// This version fissions the FV stencil into the 6 fluxes associate with each direction for each cell. In order to avoid
// redundant computation, each flux is calculated only once. However, in order to avoid writing all these fluxes to memory and
// then rereading them to complete the laplacian, the calculation of fluxes and summation in the laplacian are performed in a
// pipelined wavefront. To further enhance performance, the ij loops are fused (ghost zones are clobbered) and OpenMP simd
// pragmas are utilized. Finally, compiler specific hints and directives are utilized to facilitate simdization and nontemporal
// stores.
//------------------------------------------------------------------------------------------------------------------------------
#if (BLOCKCOPY_TILE_I != 10000)
#error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000).
#endif
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int x_id, int rhs_id, double a, double b){
// BLOCKCOPY_TILE_J+1 is required for flux_j, but BLOCKCOPY_TILE_J+2 seems to be faster (avoids prefetcher/coherency effects??)
if(level->fluxes==NULL){posix_memalign( (void**)&(level->fluxes), 512, (4)*(level->num_threads)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride)*sizeof(double) );}
int s;for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth
// exchange the ghost zone...
if((s&1)==0){exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape());}
else{exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());apply_BCs(level,VECTOR_TEMP,stencil_get_shape());}
// apply the smoother...
double _timeStart = getTime();
double h2inv = 1.0/(level->h*level->h);
// loop over all block/tiles this process owns...
#pragma omp parallel if(level->num_my_blocks>1)
{
int block;
int threadID=omp_get_thread_num();
int num_threads=omp_get_num_threads();
#if 0 // [flux][thread][ij] layout
double * __restrict__ flux_i = level->fluxes + (0*level->num_threads + threadID)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride);
double * __restrict__ flux_j = level->fluxes + (1*level->num_threads + threadID)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride);
double * __restrict__ flux_k[2] = {level->fluxes + (2*level->num_threads + threadID)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride),
level->fluxes + (3*level->num_threads + threadID)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride)};
#else // [thread][flux][ij] layout
double * __restrict__ flux_i = level->fluxes + (4*threadID + 0)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride);
double * __restrict__ flux_j = level->fluxes + (4*threadID + 1)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride);
double * __restrict__ flux_k[2] = {level->fluxes + (4*threadID + 2)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride),
level->fluxes + (4*threadID + 3)*(BLOCKCOPY_TILE_J+2)*(level->box_jStride)};
#endif
#if 1 // static, chunksize == 1... try to exploit constructive locality in the cache
int blockStart = threadID;
int blockEnd = level->num_my_blocks;
int blockStride = num_threads;
#else // static uniform
int blockStart = level->num_my_blocks*(threadID )/num_threads;
int blockEnd = level->num_my_blocks*(threadID+1)/num_threads;
int blockStride = 1;
#endif
for(block=blockStart;block<blockEnd;block+=blockStride){
const int box = level->my_blocks[block].read.box;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int jdim = level->my_blocks[block].dim.j;
const int kdim = level->my_blocks[block].dim.k;
const int ghosts = level->my_boxes[box].ghosts;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ x_n;
double * __restrict__ x_np1;
if((s&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);}
else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);}
#ifdef __INTEL_COMPILER
// superfluous with OMP4 simd (?)
//__assume_aligned(x_n ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(x_np1 ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_i ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_j ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_k ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(Dinv ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_i ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_j ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_k[0],BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_k[1],BOX_ALIGN_JSTRIDE*sizeof(double));
__assume( ( jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned
__assume( ( kStride) % BOX_ALIGN_JSTRIDE == 0);
__assume( jStride >= BOX_ALIGN_JSTRIDE);
__assume( kStride >= BOX_ALIGN_JSTRIDE);
__assume( jdim > 0);
__assume( kdim > 0);
#elif __xlC__
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), Dinv );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x_n );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x_np1 );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k[0]);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k[1]);
#endif
int ij,k;
double * __restrict__ flux_klo = flux_k[0];
// startup / prolog... calculate flux_klo (bottom of cell)...
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x_n,flux_klo:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#endif
for(ij=0;ij<jdim*jStride;ij++){
flux_klo[ij] = beta_dxdk(x_n,ij); // k==0
}
// wavefront loop...
for(k=0;k<kdim;k++){
double * __restrict__ flux_klo = flux_k[(k )&0x1];
double * __restrict__ flux_khi = flux_k[(k+1)&0x1];
#if 1
// calculate flux_i and flux_j together
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_i,beta_j,x_n,flux_i,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#endif
for(ij=0;ij<(jdim+1)*jStride;ij++){
int ijk = ij + k*kStride;
flux_i[ij] = beta_dxdi(x_n,ijk);
flux_j[ij] = beta_dxdj(x_n,ijk);
}
#else
// calculate flux_i
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_i,x_n,flux_i:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
flux_i[ij] = beta_dxdi(x_n,ijk);
}
// calculate flux_j
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_j,x_n,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#endif
for(ij=0;ij<(jdim+1)*jStride;ij++){
int ijk = ij + k*kStride;
flux_j[ij] = beta_dxdj(x_n,ijk);
}
#endif
// calculate flux_khi (top of cell)
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x_n,flux_khi:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
flux_khi[ij] = beta_dxdk(x_n,ijk+kStride); // k+1
}
const int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k^jlo^klo^s); // is element 000 of this *BLOCK* 000 red or black on this sweep
const double * __restrict__ RedBlack = level->RedBlack_FP + ghosts*(1+jStride) + jStride*((k^color000)&0x1); // Red/Black pencils... presumes ghost zones were corectly colored
#if (_OPENMP>=201307)
#pragma omp simd aligned(flux_i,flux_j,flux_klo,flux_khi,alpha,rhs,Dinv,x_n,x_np1,RedBlack:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma loop_count min=BOX_ALIGN_JSTRIDE, avg=512
#pragma vector nontemporal // generally, we don't expect to reuse x_np1
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
double Lx = - flux_i[ ij] + flux_i[ ij+ 1]
- flux_j[ ij] + flux_j[ ij+jStride]
- flux_klo[ij] + flux_khi[ij ];
#ifdef USE_HELMHOLTZ
double Ax = a*alpha[ijk]*x_n[ijk] - b*Lx;
#else
double Ax = -b*Lx;
#endif
x_np1[ijk] = x_n[ijk] + RedBlack[ij]*Dinv[ijk]*(rhs[ijk]-Ax);
}
} // kdim
} // block
} // omp
level->timers.smooth += (double)(getTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
SoaDistanceTableABOMP.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_DTDIMPL_AB_OMP_H
#define QMCPLUSPLUS_DTDIMPL_AB_OMP_H
#include "OpenMP/OMPallocator.hpp"
#include "Platforms/PinnedAllocator.h"
#include "Particle/RealSpacePositionsOMP.h"
namespace qmcplusplus
{
/**@ingroup nnlist
* @brief A derived classe from DistacneTableData, specialized for AB using a transposed form
*/
template<typename T, unsigned D, int SC>
class SoaDistanceTableABOMP : public DTD_BConds<T, D, SC>, public DistanceTableData
{
private:
template<typename DT>
using OffloadPinnedVector = Vector<DT, OMPallocator<DT, PinnedAlignedAllocator<DT>>>;
///accelerator output array for multiple walkers, N_targets x N_sources_padded x (D+1) (distances, displacements)
OffloadPinnedVector<RealType> offload_output;
///accelerator input array for a list of target particle positions, N_targets x D
OffloadPinnedVector<RealType> target_pos;
///accelerator input buffer for multiple data set
OffloadPinnedVector<char> offload_input;
///accelerator output buffer for r and dr
OffloadPinnedVector<RealType> r_dr_memorypool_;
///target particle id
std::vector<int> particle_id;
///device pointer of r_dr_memorypool_
RealType* r_dr_device_ptr_;
/// timer for offload portion
NewTimer& offload_timer_;
/// timer for copy portion
NewTimer& copy_timer_;
/// timer for offload portion
NewTimer& eval_timer_;
public:
SoaDistanceTableABOMP(const ParticleSet& source, ParticleSet& target)
: DTD_BConds<T, D, SC>(source.Lattice),
DistanceTableData(source, target),
r_dr_device_ptr_(nullptr),
offload_timer_(*TimerManager.createTimer(std::string("SoaDistanceTableABOMP::offload_") + target.getName() + "_" + source.getName(), timer_level_fine)),
copy_timer_(*TimerManager.createTimer(std::string("SoaDistanceTableABOMP::copy_") + target.getName() + "_" + source.getName(), timer_level_fine)),
eval_timer_(*TimerManager.createTimer(std::string("SoaDistanceTableABOMP::evaluate_") + target.getName() + "_" + source.getName(), timer_level_fine))
{
auto* coordinates_soa = dynamic_cast<const RealSpacePositionsOMP*>(&source.getCoordinates());
if (!coordinates_soa) throw std::runtime_error("Source particle set doesn't have OpenMP offload. Contact developers!");
resize(source.getTotalNum(), target.getTotalNum());
#pragma omp target enter data map(to:this[:1])
}
void resize(int ns, int nt)
{
N_sources = ns;
N_targets = nt;
if (N_sources * N_targets == 0)
return;
// initialize memory containers and views
const int N_sources_padded = getAlignedSize<T>(N_sources);
const int stride_size = N_sources_padded * (D + 1);
r_dr_memorypool_.resize(stride_size * N_targets);
auto* pool_ptr = r_dr_memorypool_.data();
#pragma omp target data use_device_ptr(pool_ptr)
{
r_dr_device_ptr_ = pool_ptr;
}
distances_.resize(N_targets);
displacements_.resize(N_targets);
for (int i = 0; i < N_targets; ++i)
{
distances_[i].attachReference(r_dr_memorypool_.data() + i * stride_size, N_sources);
displacements_[i].attachReference(N_sources, N_sources_padded, r_dr_memorypool_.data() + i * stride_size + N_sources_padded);
}
// The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function
// temp_r_ is padded explicitly while temp_dr_ is padded internally
temp_r_.resize(N_sources_padded);
temp_dr_.resize(N_sources);
}
SoaDistanceTableABOMP() = delete;
SoaDistanceTableABOMP(const SoaDistanceTableABOMP&) = delete;
~SoaDistanceTableABOMP()
{
#pragma omp target exit data map(delete:this[:1])
}
/** evaluate the full table */
inline void evaluate(ParticleSet& P)
{
ScopedTimer eval(&eval_timer_);
// be aware of the sign of Displacement
const int N_targets_local = N_targets;
const int N_sources_local = N_sources;
const int N_sources_padded = getAlignedSize<T>(N_sources);
target_pos.resize(N_targets * D);
for (size_t iat = 0; iat < N_targets; iat++)
for (size_t idim = 0; idim < D; idim++)
target_pos[iat * D + idim] = P.R[iat][idim];
auto* target_pos_ptr = target_pos.data();
auto* source_pos_ptr = Origin->getCoordinates().getAllParticlePos().data();
auto* r_dr_ptr = r_dr_memorypool_.data();
// To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team.
const int ChunkSizePerTeam = 256;
const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
{
ScopedTimer offload(&offload_timer_);
#pragma omp target teams distribute collapse(2) num_teams(N_targets*num_teams) \
map(to: source_pos_ptr[:N_sources_padded*D]) \
map(always, to: target_pos_ptr[:N_targets*D]) \
map(always, from: r_dr_ptr[:r_dr_memorypool_.size()])
for (int iat = 0; iat < N_targets_local; ++iat)
for (int team_id = 0; team_id < num_teams; team_id++)
{
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam;
T pos[D];
for (int idim = 0; idim < D; idim++)
pos[idim] = target_pos_ptr[iat * D + idim];
const size_t stride_size = N_sources_padded * (D + 1);
auto* r_iat_ptr = r_dr_ptr + iat * stride_size;
auto* dr_iat_ptr = r_iat_ptr + N_sources_padded;
DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded,
first, last);
}
}
}
/** It has two implementation mw_evaluate_transfer_inplace and mw_evaluate_fuse_transfer with different D2H memory transfer schemes.
* Eventually, there will be only one version wihtout any transfer and solve the dilemma.
*/
inline void mw_evaluate(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list)
{
ScopedTimer eval(&eval_timer_);
mw_evaluate_fuse_transfer(dt_list, p_list);
}
/** this function implements mw_evaluate.
* After offloading the computation of distances and displacements, the per-walker result is transferred back walker by walker in place.
* The runtime overhead is very high for small problem size with many walkers.
*/
inline void mw_evaluate_transfer_inplace(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list)
{
const size_t nw = dt_list.size();
size_t count_targets = 0;
for (ParticleSet& p: p_list)
count_targets += p.getTotalNum();
const size_t total_targets = count_targets;
// This is horrible optimization putting different data types in a single buffer but allows a single H2D transfer
constexpr size_t realtype_size = sizeof(RealType);
constexpr size_t int_size = sizeof(int);
constexpr size_t ptr_size = sizeof(RealType*);
offload_input.resize(total_targets * D * realtype_size + total_targets * int_size + (nw + total_targets) * ptr_size);
auto target_positions = reinterpret_cast<RealType*>(offload_input.data());
auto walker_id_ptr = reinterpret_cast<int*>(offload_input.data() + total_targets * D * realtype_size);
auto source_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size);
auto output_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size);
const int N_sources_padded = getAlignedSize<T>(N_sources);
offload_output.resize(total_targets * N_sources_padded * (D + 1));
count_targets = 0;
for (size_t iw = 0; iw < nw; iw++)
{
auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get());
ParticleSet& pset(p_list[iw]);
assert(N_sources == dt.N_sources);
auto& RSoA_OMP = static_cast<const RealSpacePositionsOMP&>(dt.Origin->getCoordinates());
source_ptrs[iw] = const_cast<RealType*>(RSoA_OMP.getDevicePtr());
for (size_t iat = 0; iat < pset.getTotalNum(); ++iat, ++count_targets)
{
for (size_t idim = 0; idim < D; idim++)
target_positions[count_targets * D + idim] = pset.R[iat][idim];
walker_id_ptr[count_targets] = iw;
output_ptrs[count_targets] = dt.r_dr_device_ptr_ + iat * N_sources_padded * (D + 1);
}
}
const int N_sources_local = N_sources;
// To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team.
const int ChunkSizePerTeam = 256;
const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
auto* input_ptr = offload_input.data();
{
ScopedTimer offload(&offload_timer_);
#pragma omp target teams distribute collapse(2) num_teams(total_targets*num_teams) \
map(always, to: input_ptr[:offload_input.size()]) \
nowait depend(out: total_targets)
for (int iat = 0; iat < total_targets; ++iat)
for (int team_id = 0; team_id < num_teams; team_id++)
{
auto* target_pos_ptr = reinterpret_cast<RealType*>(input_ptr);
const int walker_id = reinterpret_cast<int*>(input_ptr + total_targets * D * realtype_size)[iat];
auto* source_pos_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size)[walker_id];
auto* r_iat_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size)[iat];
auto* dr_iat_ptr = r_iat_ptr + N_sources_padded;
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam;
T pos[D];
for (int idim = 0; idim < D; idim++)
pos[idim] = target_pos_ptr[iat * D + idim];
DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded,
first, last);
}
}
{
ScopedTimer copy(©_timer_);
for (size_t iw = 0; iw < nw; iw++)
{
auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get());
auto* pool_ptr = dt.r_dr_memorypool_.data();
#pragma omp target update from(pool_ptr[:dt.r_dr_memorypool_.size()]) nowait depend(inout:total_targets)
}
#pragma omp taskwait
}
}
/** this function implements mw_evaluate.
* After offloading the computation of distances and displacements, the result for all the walkers is transferred back together in one shot
* and then copied to per-walker data structure. Memory copy on the CPU is still costly and not beneficial for large problem size with a few walkers.
*/
inline void mw_evaluate_fuse_transfer(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list)
{
const size_t nw = dt_list.size();
size_t count_targets = 0;
for (ParticleSet& p: p_list)
count_targets += p.getTotalNum();
const size_t total_targets = count_targets;
// This is horrible optimization putting different data types in a single buffer but allows a single H2D transfer
const size_t realtype_size = sizeof(RealType);
const size_t int_size = sizeof(int);
const size_t ptr_size = sizeof(RealType*);
offload_input.resize(total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size);
auto target_positions = reinterpret_cast<RealType*>(offload_input.data());
auto walker_id_ptr = reinterpret_cast<int*>(offload_input.data() + total_targets * D * realtype_size);
auto source_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size);
particle_id.resize(total_targets);
const int N_sources_padded = getAlignedSize<T>(N_sources);
offload_output.resize(total_targets * N_sources_padded * (D + 1));
count_targets = 0;
for (size_t iw = 0; iw < nw; iw++)
{
auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get());
ParticleSet& pset(p_list[iw]);
assert(N_sources == dt.N_sources);
auto& RSoA_OMP = static_cast<const RealSpacePositionsOMP&>(dt.Origin->getCoordinates());
source_ptrs[iw] = const_cast<RealType*>(RSoA_OMP.getDevicePtr());
for (size_t iat = 0; iat < pset.getTotalNum(); ++iat, ++count_targets)
{
for (size_t idim = 0; idim < D; idim++)
target_positions[count_targets * D + idim] = pset.R[iat][idim];
walker_id_ptr[count_targets] = iw;
particle_id[count_targets] = iat;
}
}
const int N_sources_local = N_sources;
// To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team.
const int ChunkSizePerTeam = 256;
const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
auto* r_dr_ptr = offload_output.data();
auto* input_ptr = offload_input.data();
{
ScopedTimer offload(&offload_timer_);
#pragma omp target teams distribute collapse(2) num_teams(total_targets*num_teams) \
map(always, to: input_ptr[:offload_input.size()]) \
map(always, from: r_dr_ptr[:offload_output.size()])
for (int iat = 0; iat < total_targets; ++iat)
for (int team_id = 0; team_id < num_teams; team_id++)
{
auto* target_pos_ptr = reinterpret_cast<RealType*>(input_ptr);
const int walker_id = reinterpret_cast<int*>(input_ptr + total_targets * D * realtype_size)[iat];
auto* source_pos_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size)[walker_id];
auto* r_iat_ptr = r_dr_ptr + iat * N_sources_padded * (D + 1);
auto* dr_iat_ptr = r_dr_ptr + iat * N_sources_padded * (D + 1) + N_sources_padded;
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam;
T pos[D];
for (int idim = 0; idim < D; idim++)
pos[idim] = target_pos_ptr[iat * D + idim];
DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded,
first, last);
}
}
{
ScopedTimer copy(©_timer_);
for (size_t iat = 0; iat < total_targets; iat++)
{
const int wid = walker_id_ptr[iat];
const int pid = particle_id[iat];
auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[wid].get());
assert(N_sources_padded == dt.displacements_[pid].capacity());
auto offset = offload_output.data() + iat * N_sources_padded * (D + 1);
std::copy_n(offset, N_sources_padded, dt.distances_[pid].data());
std::copy_n(offset + N_sources_padded, N_sources_padded * D, dt.displacements_[pid].data());
}
}
}
///evaluate the temporary pair relations
inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old)
{
DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_, 0,
N_sources);
// If the full table is not ready all the time, overwrite the current value.
// If this step is missing, DT values can be undefined in case a move is rejected.
if (!need_full_table_)
DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(),
displacements_[iat], 0, N_sources);
}
///update the stripe for jat-th particle
inline void update(IndexType iat, bool partial_update)
{
std::copy_n(temp_r_.data(), N_sources, distances_[iat].data());
for (int idim = 0; idim < D; ++idim)
std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim));
}
size_t get_neighbors(int iat,
RealType rcut,
int* restrict jid,
RealType* restrict dist,
PosType* restrict displ) const
{
constexpr T cminus(-1);
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
jid[nn] = jat;
dist[nn] = rij;
displ[nn] = cminus * displacements_[jat][iat];
nn++;
}
}
return nn;
}
int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const
{
RealType min_dist = std::numeric_limits<RealType>::max();
int index = -1;
if (newpos)
{
for (int jat = 0; jat < N_sources; ++jat)
if (temp_r_[jat] < min_dist)
{
min_dist = temp_r_[jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = temp_dr_[index];
}
}
else
{
for (int jat = 0; jat < N_sources; ++jat)
if (distances_[iat][jat] < min_dist)
{
min_dist = distances_[iat][jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = displacements_[iat][index];
}
}
return index;
}
size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const
{
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
dist[nn] = rij;
nn++;
}
}
return nn;
}
};
} // namespace qmcplusplus
#endif
|
rawMD5_fmt_plug.c | /*
* Raw-MD5 (thick) based on Raw-MD4 w/ mmx/sse/intrinsics
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* OMP added May 2013, JimF
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawMD5);
#else
#include <string.h>
#include "arch.h"
#include "md5.h"
#include "misc.h" // error()
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "base64_convert.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
//#undef SIMD_COEF_32
//#undef SIMD_PARA_MD5
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 256 // core i7
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD5"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifndef MD5_BUF_SIZ
#define MD5_BUF_SIZ 16
#endif
#define CIPHERTEXT_LENGTH 32
#define DIGEST_SIZE 16
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define FORMAT_TAG "$dynamic_0$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_TAG2 "{MD5}"
#define FORMAT_TAG2_LEN (sizeof(FORMAT_TAG2) - 1)
static struct fmt_tests tests[] = {
{"5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{FORMAT_TAG "5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{"098f6bcd4621d373cade4e832627b4f6", "test"},
{FORMAT_TAG "378e2c4a07968da2eca692320136433d", "thatsworking"},
{FORMAT_TAG "8ad8757baa8564dc136c1e07507f4a98", "test3"},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
#ifdef DEBUG
{FORMAT_TAG "c9ccf168914a1bcfc3229f1948e67da0","1234567890123456789012345678901234567890123456789012345"},
#if PLAINTEXT_LENGTH >= 80
{FORMAT_TAG "57edf4a22be3c955ac49da2e2107b67a","12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
#endif
#endif
{"{MD5}CY9rzUYh03PK3k6DJie09g==", "test"},
{NULL}
};
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
static ARCH_WORD_32 (*saved_key)[MD5_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#else
self->params.max_keys_per_crypt *= 10;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
/* Convert {MD5}CY9rzUYh03PK3k6DJie09g== to 098f6bcd4621d373cade4e832627b4f6 */
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (!strncmp(fields[1], FORMAT_TAG2, FORMAT_TAG2_LEN) && strlen(fields[1]) == FORMAT_TAG2_LEN+24) {
int res;
res = base64_convert(&fields[1][FORMAT_TAG2_LEN], e_b64_mime, 24,
out, e_b64_hex, sizeof(out),
flg_Base64_HEX_LOCASE, 0);
if (res >= 0)
return out;
}
return fields[1];
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (*p == '$' && !strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG;
if (ciphertext[0] == '$' &&
!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned long dummy;
unsigned int i[DIGEST_SIZE/sizeof(unsigned int)];
} _out;
unsigned int *out = _out.i;
unsigned int i;
unsigned int temp;
ciphertext += TAG_LENGTH;
for (i=0; i<4; i++)
{
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24;
#if ARCH_LITTLE_ENDIAN
out[i]=temp;
#else
out[i]=JOHNSWAP(temp);
#endif
}
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md5_reverse(out);
#endif
return out;
}
static char *source(char *source, void *binary)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG;
ARCH_WORD_32 b[4];
char *p;
int i, j;
memcpy(b, binary, sizeof(b));
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md5_unreverse(b);
#endif
#if ARCH_LITTLE_ENDIAN==0
alter_endianity(b, 16);
#endif
p = &out[TAG_LENGTH];
for (i = 0; i < 4; i++)
for (j = 0; j < 8; j++)
*p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf];
return out;
}
#ifdef SIMD_COEF_32
static void set_key(char *_key, int index)
{
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *key = (ARCH_WORD_32*)_key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *key = (uint32_t*)(is_aligned(_key, sizeof(uint32_t)) ?
_key : strcpy(buf_aligned, _key));
#endif
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
#ifdef DEBUG
/* This function is higly optimized and assumes that we are
never ever given a key longer than fmt_params.plaintext_length.
If we are, buffer overflows WILL happen */
if (len > PLAINTEXT_LENGTH) {
fprintf(stderr, "\n** Core bug: got len %u\n'%s'\n", len, _key);
error();
}
#endif
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index)
{
static char out[PLAINTEXT_LENGTH + 1];
unsigned int i;
ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32] >> 3;
for(i=0;i<len;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char*)out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < loops; index++) {
#if SIMD_COEF_32
SIMDmd5body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], saved_len[index]);
MD5_Final((unsigned char *)crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x, y;
#if 1
const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32;
#else
const unsigned int c = SIMD_PARA_MD5;
#endif
for(y = 0; y < c; y++)
for(x = 0; x < SIMD_COEF_32; x++)
{
if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
unsigned int index = 0;
#if 1
for (index = 0; index < count; index++)
#endif
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x = index&(SIMD_COEF_32-1);
unsigned int y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4];
#else
return !memcmp(binary, crypt_key[index], DIGEST_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef SIMD_COEF_32
ARCH_WORD_32 crypt_key[DIGEST_SIZE / 4];
MD5_CTX ctx;
char *key = get_key(index);
MD5_Init(&ctx);
MD5_Update(&ctx, key, strlen(key));
MD5_Final((void*)crypt_key, &ctx);
#ifdef REVERSE_STEPS
md5_reverse(crypt_key);
#endif
return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE);
#else
return 1;
#endif
}
#ifdef SIMD_COEF_32
#define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
#endif
struct fmt_main fmt_rawMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG2 },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
threading.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <algorithm>
#include <functional>
#include <vector>
namespace LightGBM {
class Threading {
public:
template <typename INDEX_T>
static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
int num_threads = OMP_NUM_THREADS();
BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock,
block_size);
}
template <typename INDEX_T>
static inline void BlockInfo(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block, int* out_nblock,
INDEX_T* block_size) {
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1) {
*block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock));
} else {
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1) {
*block_size = (cnt + (*out_nblock) - 1) / (*out_nblock);
// force the block size to the times of min_cnt_per_block
*block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block *
min_cnt_per_block;
} else {
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
int num_threads = OMP_NUM_THREADS();
BlockInfoForceSize<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock,
block_size);
}
template <typename INDEX_T>
static inline int For(
INDEX_T start, INDEX_T end, INDEX_T min_block_size,
const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) {
int n_block = 1;
INDEX_T num_inner = end - start;
BlockInfo<INDEX_T>(end - start, min_block_size, &n_block, &num_inner);
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < n_block; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = std::min(end, inner_start + num_inner);
inner_fun(i, inner_start, inner_end);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
return n_block;
}
template <typename INDEX_T, typename VAL1_T, typename VAL2_T>
static inline int SumReduction(
INDEX_T start, INDEX_T end, INDEX_T min_block_size,
const std::function<void(int, INDEX_T, INDEX_T, VAL1_T* res1,
VAL2_T* res2)>& inner_fun,
VAL1_T* res1, VAL2_T* res2) {
int n_block = 1;
INDEX_T num_inner = end - start;
BlockInfoForceSize<INDEX_T>(end - start, min_block_size, &n_block,
&num_inner);
std::vector<VAL1_T> val_1s(n_block, static_cast<VAL1_T>(0));
std::vector<VAL2_T> val_2s(n_block, static_cast<VAL2_T>(0));
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < n_block; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = std::min(end, inner_start + num_inner);
inner_fun(i, inner_start, inner_end, &val_1s[i], &val_2s[i]);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
*res1 = 0;
*res2 = 0;
for (int i = 0; i < n_block; ++i) {
*res1 += val_1s[i];
*res2 += val_2s[i];
}
return n_block;
}
};
template <typename INDEX_T, bool TWO_BUFFER>
class ParallelPartitionRunner {
public:
ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size)
: min_block_size_(min_block_size) {
num_threads_ = OMP_NUM_THREADS();
left_.resize(num_data);
if (TWO_BUFFER) {
right_.resize(num_data);
}
offsets_.resize(num_threads_);
left_cnts_.resize(num_threads_);
right_cnts_.resize(num_threads_);
left_write_pos_.resize(num_threads_);
right_write_pos_.resize(num_threads_);
}
~ParallelPartitionRunner() {}
void ReSize(INDEX_T num_data) {
left_.resize(num_data);
if (TWO_BUFFER) {
right_.resize(num_data);
}
}
template<bool FORCE_SIZE>
INDEX_T Run(
INDEX_T cnt,
const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T*, INDEX_T*)>& func,
INDEX_T* out) {
int nblock = 1;
INDEX_T inner_size = cnt;
if (FORCE_SIZE) {
Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_,
&nblock, &inner_size);
} else {
Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock,
&inner_size);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T cur_start = i * inner_size;
INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start);
offsets_[i] = cur_start;
if (cur_cnt <= 0) {
left_cnts_[i] = 0;
right_cnts_[i] = 0;
continue;
}
auto left_ptr = left_.data() + cur_start;
INDEX_T* right_ptr = nullptr;
if (TWO_BUFFER) {
right_ptr = right_.data() + cur_start;
}
// split data inner, reduce the times of function called
INDEX_T cur_left_count =
func(i, cur_start, cur_cnt, left_ptr, right_ptr);
if (!TWO_BUFFER) {
// reverse for one buffer
std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt);
}
left_cnts_[i] = cur_left_count;
right_cnts_[i] = cur_cnt - cur_left_count;
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
left_write_pos_[0] = 0;
right_write_pos_[0] = 0;
for (int i = 1; i < nblock; ++i) {
left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1];
right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1];
}
data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1];
auto right_start = out + left_cnt;
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i) {
std::copy_n(left_.data() + offsets_[i], left_cnts_[i],
out + left_write_pos_[i]);
if (TWO_BUFFER) {
std::copy_n(right_.data() + offsets_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
} else {
std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
}
}
return left_cnt;
}
private:
int num_threads_;
INDEX_T min_block_size_;
std::vector<INDEX_T> left_;
std::vector<INDEX_T> right_;
std::vector<INDEX_T> offsets_;
std::vector<INDEX_T> left_cnts_;
std::vector<INDEX_T> right_cnts_;
std::vector<INDEX_T> left_write_pos_;
std::vector<INDEX_T> right_write_pos_;
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
relic_core.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2015 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* RELIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with RELIC. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
*
* Implementation of the library basic functions.
*
* @ingroup relic
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "relic_core.h"
#include "relic_rand.h"
#include "relic_types.h"
#include "relic_err.h"
#include "relic_arch.h"
#include "relic_fp.h"
#include "relic_fb.h"
#include "relic_ep.h"
#include "relic_eb.h"
#include "relic_cp.h"
#include "relic_pp.h"
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
/**
* If multi-threading is enabled, assigns each thread a local copy of the data.
*/
#if MULTI == PTHREAD
#define thread __thread
#else
#define thread /* */
#endif
/**
* Default library context.
*/
thread ctx_t first_ctx;
/**
* Active library context.
*/
thread ctx_t *core_ctx = NULL;
#if MULTI == OPENMP
#pragma omp threadprivate(first_ctx, core_ctx)
#endif
int core_init(void) {
if (core_ctx == NULL) {
core_ctx = &(first_ctx);
}
#if defined(RELIC_CHECK) && defined(TRACE)
core_ctx->trace = 0;
#endif
#ifdef RELIC_CHECK
core_ctx->reason[ERR_NO_MEMORY] = MSG_NO_MEMORY;
core_ctx->reason[ERR_NO_PRECI] = MSG_NO_PRECI;
core_ctx->reason[ERR_NO_FILE] = MSG_NO_FILE;
core_ctx->reason[ERR_NO_READ] = MSG_NO_READ;
core_ctx->reason[ERR_NO_VALID] = MSG_NO_VALID;
core_ctx->reason[ERR_NO_BUFFER] = MSG_NO_BUFFER;
core_ctx->reason[ERR_NO_FIELD] = MSG_NO_FIELD;
core_ctx->reason[ERR_NO_CURVE] = MSG_NO_CURVE;
core_ctx->reason[ERR_NO_CONFIG] = MSG_NO_CONFIG;
core_ctx->last = NULL;
#endif /* RELIC_CHECK */
#if ALLOC == STATIC
core_ctx->next = 0;
#endif
#ifdef OVERH
core_ctx->over = 0;
#endif
core_ctx->code = STS_OK;
TRY {
arch_init();
rand_init();
#ifdef WITH_FP
fp_prime_init();
#endif
#ifdef WITH_FB
fb_poly_init();
#endif
#ifdef WITH_FT
ft_poly_init();
#endif
#ifdef WITH_EP
ep_curve_init();
#endif
#ifdef WITH_EB
eb_curve_init();
#endif
#ifdef WITH_ED
ed_curve_init();
#endif
#ifdef WITH_PP
pp_map_init();
#endif
}
CATCH_ANY {
return STS_ERR;
}
return STS_OK;
}
int core_clean(void) {
rand_clean();
#ifdef WITH_FP
fp_prime_clean();
#endif
#ifdef WITH_FB
fb_poly_clean();
#endif
#ifdef WITH_FT
ft_poly_clean();
#endif
#ifdef WITH_EP
ep_curve_clean();
#endif
#ifdef WITH_EB
eb_curve_clean();
#endif
#ifdef WITH_ED
ed_curve_clean();
#endif
#ifdef WITH_PP
pp_map_clean();
#endif
arch_clean();
core_ctx = NULL;
return STS_OK;
}
ctx_t *core_get() {
return core_ctx;
}
void core_set(ctx_t *ctx) {
core_ctx = ctx;
}
|
GB_unaryop__minv_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int32
// op(A') function: GB_tran__minv_fp32_int32
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int32
(
float *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
automine_omp.h |
// ad-hoc 4-clique with on-the-fly symmetry breaking
void automine_4clique_sb(Graph &g, uint64_t &total) {
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
//auto tid = omp_get_thread_num();
uint64_t local_counter = 0;
auto y0 = g.N(v0);
#if 0
for (auto v1 : y0) {
if (v1 >= v0) break;
auto y0y1 = y0 & g.N(v1);
for (auto v2 : y0y1) {
if (v2 >= v1) break;
counter += intersection_num(y0y1, g.N(v2), v2);
}
}
#else
auto y0f0 = bounded(y0,v0);
for (auto v1 : y0f0) {
auto y1 = g.N(v1);
auto y0y1 = intersection_set(y0, y1);
auto y0y1f1 = bounded(y0y1,v1);
for (auto v2 : y0y1f1) {
VertexSet y2 = g.N(v2);
local_counter += intersection_num(y0y1, y2, v2);
}
}
counter += local_counter;
#endif
}
total = counter;
}
// ad-hoc 4-clique (use DAG)
void automine_4clique(Graph &g, uint64_t &total) {
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
//auto tid = omp_get_thread_num();
auto y0 = g.N(v0);
for (auto v1 : y0) {
auto y0y1 = y0 & g.N(v1);
for (auto v2 : y0y1) {
local_counter += intersection_num(y0y1, g.N(v2));
}
}
counter += local_counter;
}
total = counter;
}
// ad-hoc 5-clique with on-the-fly symmetry breaking
void automine_5clique_sb(Graph &g, uint64_t &total) {
uint64_t counter = 0;
//auto tid = omp_get_thread_num();
#if 0
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v1 = 0; v1 < g.V(); v1++) {
uint64_t local_counter = 0;
auto y1 = g.N(v1);
for (auto v2 : y1) {
if (v2 > v1) break;
auto y1y2 = intersection_set(y1, g.N(v2));
for (auto v3 : y1y2) {
if (v3 > v2) break;
auto y1y2y3 = intersection_set(y1y2, g.N(v3));
for (auto v4 : y1y2y3) {
if (v4 > v3) break;
local_counter += intersection_num(y1y2y3, g.N(v4), v4);
}
}
}
counter += local_counter;
}
#else
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for(vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
VertexSet y0 = g.N(v0);
VertexSet y0f0 = bounded(y0,v0);
for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) {
vidType v1 = y0f0.begin()[idx1];
VertexSet y1 = g.N(v1);
VertexSet y0y1 = intersection_set(y0, y1);
VertexSet y0y1f1 = bounded(y0y1,v1);
for(vidType idx2 = 0; idx2 < y0y1f1.size(); idx2++) {
vidType v2 = y0y1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet y0y1y2 = intersection_set(y0y1, y2);
VertexSet y0y1y2f2 = bounded(y0y1y2,v2);
for(vidType idx3 = 0; idx3 < y0y1y2f2.size(); idx3++) {
vidType v3 = y0y1y2f2.begin()[idx3];
VertexSet y3 = g.N(v3);
local_counter += intersection_num(y0y1y2, y3, v3);
}
}
}
counter += local_counter;
}
#endif
total = counter;
}
// ad-hoc 5-clique (use DAG)
void automine_5clique(Graph &g, uint64_t &total) {
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v1 = 0; v1 < g.V(); v1++) {
//auto tid = omp_get_thread_num();
uint64_t local_counter = 0;
auto y1 = g.N(v1);
for (auto v2 : y1) {
auto y1y2 = intersection_set(y1, g.N(v2));
for (auto v3 : y1y2) {
auto y1y2y3 = intersection_set(y1y2, g.N(v3));
for (auto v4 : y1y2y3) {
local_counter += intersection_num(y1y2y3, g.N(v4));
}
}
}
counter += local_counter;
}
total = counter;
}
void automine_kclique(Graph &g, unsigned k, uint64_t &total) {
std::cout << "Running AutoMine k-clique solver\n";
if (k == 4) {
#if USE_DAG == 1
automine_4clique(g, total);
#else
automine_4clique_sb(g, total);
#endif
} else if (k == 5) {
#if USE_DAG == 1
automine_5clique(g, total);
#else
automine_4clique_sb(g, total);
#endif
} else {
std::cout << "Not implemented yet\n";
exit(0);
}
}
|
relax.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.8 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "headers.h"
//#include "omp.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGSeqRelax
*--------------------------------------------------------------------------*/
int hypre_BoomerAMGSeqRelax( hypre_CSRMatrix *A,
hypre_Vector *f,
hypre_Vector *u)
{
double *A_diag_data = hypre_CSRMatrixData(A);
int *A_diag_i = hypre_CSRMatrixI(A);
int *A_diag_j = hypre_CSRMatrixJ(A);
int n = hypre_CSRMatrixNumRows(A);
double *u_data = hypre_VectorData(u);
double *f_data = hypre_VectorData(f);
double *tmp_data;
double res;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
// int index, start;
int num_threads;
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (1)
{
tmp_data = hypre_CTAlloc(double,n);
#pragma omp parallel private(num_threads)
{
num_threads = 1; /* omp_get_num_threads(); */
#pragma omp for private(i)
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#pragma omp for private(i,ii,j,jj,ns,ne,res,rest,size)
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
return(relax_error);
}
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
#define ThrowClassifyException(severity,tag,label) \
{\
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \
{ \
next_cluster=cluster->next; \
cluster=(Cluster *) RelinquishMagickMemory(cluster); \
} \
if (squares != (double *) NULL) \
{ \
squares-=255; \
free_squares=squares; \
free_squares=(double *) RelinquishMagickMemory(free_squares); \
} \
ThrowBinaryException(severity,tag,label); \
}
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
squares=(double *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowClassifyException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*clust;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) 0,q);
for (clust=head; clust != (Cluster *) NULL; clust=clust->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(clust->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(clust->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(clust->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(clust->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(clust->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(clust->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) clust->id,q);
break;
}
}
if (clust == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau*=PerceptibleReciprocal((double) number_nodes);
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
bli_cntx_init_a64fx.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
#include "bli_a64fx_sector_cache.h"
void bli_cntx_init_a64fx( cntx_t* cntx )
{
blksz_t blkszs[ BLIS_NUM_BLKSZS ];
blksz_t thresh[ BLIS_NUM_THRESH ];
// Set default kernel blocksizes and functions.
bli_cntx_init_a64fx_ref( cntx );
// -------------------------------------------------------------------------
// Update the context with optimized native gemm micro-kernels and
// their storage preferences.
bli_cntx_set_l3_nat_ukrs
(
2,
BLIS_GEMM_UKR, BLIS_FLOAT, bli_sgemm_armsve_asm_2vx10_unindexed, FALSE,
BLIS_GEMM_UKR, BLIS_DOUBLE, bli_dgemm_armsve_asm_2vx10_unindexed, FALSE,
cntx
);
// Set SVE-512 packing routine.
bli_cntx_set_packm_kers
(
3,
BLIS_PACKM_10XK_KER, BLIS_DOUBLE, bli_dpackm_armsve512_asm_10xk,
BLIS_PACKM_12XK_KER, BLIS_DOUBLE, bli_dpackm_armsve512_asm_12xk,
BLIS_PACKM_16XK_KER, BLIS_DOUBLE, bli_dpackm_armsve512_asm_16xk,
cntx
);
// Initialize level-3 blocksize objects with architecture-specific values.
// s d c z
bli_blksz_init_easy( &blkszs[ BLIS_MR ], 32, 16, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_NR ], 10, 10, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_MC ], 256, 128, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_KC ], 2048, 2048, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_NC ], 23040, 26880, -1, -1 );
// Update the context with the current architecture's register and cache
// blocksizes (and multiples) for native execution.
bli_cntx_set_blkszs
(
BLIS_NAT, 5,
BLIS_NC, &blkszs[ BLIS_NC ], BLIS_NR,
BLIS_KC, &blkszs[ BLIS_KC ], BLIS_KR,
BLIS_MC, &blkszs[ BLIS_MC ], BLIS_MR,
BLIS_NR, &blkszs[ BLIS_NR ], BLIS_NR,
BLIS_MR, &blkszs[ BLIS_MR ], BLIS_MR,
cntx
);
#if 0
// Initialize sup thresholds with architecture-appropriate values.
// s d c z
bli_blksz_init_easy( &thresh[ BLIS_MT ], -1, 65, -1, -1 );
bli_blksz_init_easy( &thresh[ BLIS_NT ], -1, 65, -1, -1 );
bli_blksz_init_easy( &thresh[ BLIS_KT ], -1, 65, -1, -1 );
// Initialize the context with the sup thresholds.
bli_cntx_set_l3_sup_thresh
(
3,
BLIS_MT, &thresh[ BLIS_MT ],
BLIS_NT, &thresh[ BLIS_NT ],
BLIS_KT, &thresh[ BLIS_KT ],
cntx
);
// Update the context with optimized small/unpacked gemm kernels.
bli_cntx_set_l3_sup_kers
(
4,
BLIS_RRR, BLIS_DOUBLE, bli_dgemmsup_rv_armsve_10x2v_unindexed, TRUE,
BLIS_RCR, BLIS_DOUBLE, bli_dgemmsup_rv_armsve_10x2v_unindexed, TRUE,
BLIS_CCR, BLIS_DOUBLE, bli_dgemmsup_rv_armsve_10x2v_unindexed, TRUE,
BLIS_CCC, BLIS_DOUBLE, bli_dgemmsup_rv_armsve_10x2v_unindexed, TRUE,
cntx
);
// Initialize level-3 sup blocksize objects with architecture-specific
// values.
// s d c z
bli_blksz_init_easy( &blkszs[ BLIS_MR ], -1, 10, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_NR ], -1, 16, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_MC ], -1, 120, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_KC ], -1, 256, -1, -1 );
bli_blksz_init_easy( &blkszs[ BLIS_NC ], -1, 4080, -1, -1 );
// Update the context with the current architecture's register and cache
// blocksizes for small/unpacked level-3 problems.
bli_cntx_set_l3_sup_blkszs
(
5,
BLIS_NC, &blkszs[ BLIS_NC ],
BLIS_KC, &blkszs[ BLIS_KC ],
BLIS_MC, &blkszs[ BLIS_MC ],
BLIS_NR, &blkszs[ BLIS_NR ],
BLIS_MR, &blkszs[ BLIS_MR ],
cntx
);
#endif
// Set A64FX cache sector sizes for each PE/CMG
// SC Fugaku might disable users' setting cache sizes.
#if !defined(CACHE_SECTOR_SIZE_READONLY)
#pragma omp parallel
{
A64FX_SETUP_SECTOR_CACHE_SIZES(A64FX_SCC(0,1,3,0))
A64FX_SETUP_SECTOR_CACHE_SIZES_L2(A64FX_SCC_L2(9,28))
}
#endif
}
|
ClassNLLCriterion.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/ClassNLLCriterion.c"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THNNState *state,
THTensor *input,
THIndexTensor *target,
THTensor *output,
bool sizeAverage,
THTensor *weights,
THTensor *total_weight,
int64_t ignore_index,
bool reduce)
{
THTensor_(resize1d)(total_weight, 1);
int n_dims = THTensor_(nDimension)(input);
int n_classes = THTensor_(size)(input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (THIndexTensor_(nDimension)(target) > 1) {
THError("multi-target not supported");
}
if (THTensor_(nDimension)(input) > 2) {
THError("input tensor should be 1D or 2D");
}
if (weights && THTensor_(nElement)(weights) != n_classes) {
THDescBuff s1 = THTensor_(sizeDesc)(weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (!reduce && n_dims == 2) {
int batch_size = THTensor_(size)(input, 0);
THTensor_(resize1d)(output, batch_size);
int invalid_target = -1; // We cannot throw an exception inside omp parallel
int i;
#pragma omp parallel for private(i)
for (i = 0; i < batch_size; i++) {
int cur_target = THTensor_fastGet1d(target, i) - TH_INDEX_BASE;
if (cur_target >= 0 && cur_target < n_classes) {
if (cur_target == ignore_index) {
THTensor_fastSet1d(output, i, 0.0f);
continue;
}
real cur_weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f;
THTensor_fastSet1d(output, i, -THTensor_fastGet2d(input, i, cur_target) * cur_weight);
} else {
THAtomicCompareAndSwap(&invalid_target, -1, cur_target);
}
}
if (invalid_target >= 0) {
THError("Target %d out of bounds", invalid_target);
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
THTensor_(resize1d)(output, 1);
input = THTensor_(newContiguous)(input);
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
real *input_data = THTensor_(data)(input);
THIndex_t *target_data = THIndexTensor_(data)(target);
real *weights_data = weights ? THTensor_(data)(weights) : NULL;
real *output_data = THTensor_(data)(output);
real *total_weight_data = THTensor_(data)(total_weight);
output_data[0] = total_weight_data[0] = 0.0;
if (THTensor_(nDimension)(input) == 1) {
int cur_target = target_data[0] - TH_INDEX_BASE;
if (cur_target != ignore_index) {
THAssert(cur_target >= 0 && cur_target < n_classes);
total_weight_data[0] = weights ? weights_data[cur_target] : 1.0f;
output_data[0] = -input_data[cur_target] * total_weight_data[0];
}
} else if (THTensor_(nDimension)(input) == 2) {
int batch_size = THTensor_(size)(input, 0);
THAssert(THIndexTensor_(size)(target, 0) == batch_size);
int n_target = THTensor_(size)(input, 1);
int i;
for (i = 0; i < batch_size; i++) {
int cur_target = target_data[i] - TH_INDEX_BASE;
if (cur_target != ignore_index) {
THAssert(cur_target >= 0 && cur_target < n_classes);
real cur_weight = weights ? weights_data[cur_target] : 1.0f;
total_weight_data[0] += cur_weight;
output_data[0] -= input_data[i * n_target + cur_target] * cur_weight;
}
}
}
if (sizeAverage && total_weight_data[0]) {
output_data[0] /= total_weight_data[0];
}
if (weights) {
THTensor_(free)(weights);
}
THTensor_(free)(input);
THIndexTensor_(free)(target);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THNNState *state,
THTensor *input,
THIndexTensor *target,
THTensor *gradOutput,
THTensor *gradInput,
bool sizeAverage,
THTensor *weights,
THTensor *total_weight,
int64_t ignore_index,
bool reduce)
{
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
int n_dims = THTensor_(nDimension)(input);
int n_classes = THTensor_(size)(input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (!THTensor_(isContiguous)(gradInput)) {
THError("gradInput must be contiguous");
}
if (THIndexTensor_(nDimension)(target) > 1) {
THError("multi-target not supported");
}
if (THTensor_(nDimension)(input) > 2) {
THError("input tensor should be 1D or 2D");
}
if (weights && THTensor_(nElement)(weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (!reduce && n_dims == 2) {
int batch_size = THTensor_(size)(input, 0);
THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, batch_size);
int i;
#pragma omp parallel for private(i)
for (i = 0; i < batch_size; i++) {
int cur_target = THTensor_fastGet1d(target, i) - TH_INDEX_BASE;
if (cur_target == ignore_index) {
continue;
}
real weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f;
THTensor_fastSet2d(gradInput, i, cur_target, -weight * THTensor_fastGet1d(gradOutput, i));
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
real *total_weight_data = THTensor_(data)(total_weight);
if (*total_weight_data <= 0) {
return;
}
THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1);
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
THIndex_t *target_data = THIndexTensor_(data)(target);
real *weights_data = weights ? THTensor_(data)(weights) : NULL;
real *gradInput_data = THTensor_(data)(gradInput);
real gradOutput_value = THTensor_(get1d)(gradOutput, 0);
if (THTensor_(nDimension)(input) == 1) {
int cur_target = target_data[0] - TH_INDEX_BASE;
if (cur_target != ignore_index) {
THAssert(cur_target >= 0 && cur_target < n_classes);
gradInput_data[cur_target] =
(!sizeAverage && weights) ? -weights_data[cur_target] : -1;
gradInput_data[cur_target] *= gradOutput_value;
}
} else if (THTensor_(nDimension)(input) == 2) {
int batch_size = THTensor_(size)(input, 0);
THAssert(THIndexTensor_(size)(target, 0) == batch_size);
int n_target = THTensor_(size)(input, 1);
int i;
for (i = 0; i < batch_size; i++){
int cur_target = target_data[i] - TH_INDEX_BASE;
if (cur_target != ignore_index) {
THAssert(cur_target >= 0 && cur_target < n_classes);
gradInput_data[i * n_target + cur_target] =
-(weights ? weights_data[cur_target] : 1.0f) * gradOutput_value;
if (sizeAverage && *total_weight_data) {
gradInput_data[i * n_target + cur_target] /= *total_weight_data;
}
}
}
}
THIndexTensor_(free)(target);
if (weights) {
THTensor_(free)(weights);
}
}
#endif
|
6319.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
for (t4 = 1; t4 <= nx - 1; t4 += 1)
for (t6 = 0; t6 <= ny - 1; t6 += 1)
ey[t4][t6] = ey[t4][t6] - 0.5 * (hz[t4][t6] - hz[t4 - 1][t6]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 16)
for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
expand1.c | int main() {
int X = 0;
int Y = 10;
while (1) {
#pragma omp parallel
{
#pragma omp atomic
X = X + 1;
#pragma omp atomic
Y = Y + 1;
}
if (Y < 10) {
// Y = Y + 1;
break;
}
}
i++;
}
|
GB_binop__minus_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32)
// A*D function (colscale): GB (_AxD__minus_uint32)
// D*A function (rowscale): GB (_DxB__minus_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32)
// C=scalar+B GB (_bind1st__minus_uint32)
// C=scalar+B' GB (_bind1st_tran__minus_uint32)
// C=A+scalar GB (_bind2nd__minus_uint32)
// C=A'+scalar GB (_bind2nd_tran__minus_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
neuralnetwork.c | #include "neuralnetwork.h"
//#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <math.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <limits.h>
#define NEURON_PARALLEL 0
#define NEURON_N_THREADS 8
#define LAYER_PARALLEL 0
#define LAYER_N_THREADS 8
#define INIT_MAX 1.0
#define INIT_MIN -1.0
Neuron * new_neuron(nn_size_t n_dim, nn_float_t (*actv)(nn_float_t))
{
Neuron * neuron = (Neuron*) malloc(sizeof(Neuron));
neuron->actv = actv;
neuron->n_dim = n_dim;
neuron->weights = (nn_float_t*) malloc((n_dim+1) * sizeof(nn_float_t));
unsigned long long aux_rand;
nn_size_t i;
for (i=0; i<n_dim+1; i++){
syscall(SYS_getrandom, &aux_rand, sizeof(aux_rand), 0);
neuron->weights[i] = ((aux_rand / (nn_float_t)ULLONG_MAX)*(INIT_MAX-INIT_MIN)) + INIT_MIN;
}
return neuron;
}
void delete_neuron(Neuron * neuron)
{
free(neuron->weights);
free(neuron);
}
nn_float_t neuron_forward(Neuron * neuron, nn_float_t * input)
{
nn_float_t net = 0.0;
#if NEURON_PARALLEL
#pragma omp parallel num_threads(NEURON_N_THREADS)
{
int id = omp_get_thread_num();
nn_size_t block_size = neuron->n_dim/omp_get_num_threads(), lower_bound = block_size*id, upper_bound = block_size*(id+1);
if (id == omp_get_max_threads()-1) upper_bound = neuron->n_dim;
nn_float_t aux;
nn_size_t i;
for(i=lower_bound; i<upper_bound; i++){
aux = neuron->weights[i] * input[i];
#pragma omp critical(neuron_sum)
{
net += aux;
}
}
}
net += neuron->weights[neuron->n_dim];
#else
nn_size_t i;
for (i=0; i<neuron->n_dim; i++) net += neuron->weights[i] * input[i];
net += neuron->weights[neuron->n_dim];
#endif
return neuron->actv(net);
}
void print_neuron(Neuron * neuron)
{
int i;
for (i=0; i<neuron->n_dim; i++) printf("Weight[%d] = %f\n", i, neuron->weights[i]);
printf("Beta = %f\n", neuron->weights[neuron->n_dim]);
}
Layer * new_layer(nn_size_t n_neurons, nn_size_t in_size, nn_float_t (*actv)(nn_float_t))
{
Layer * layer = (Layer*) malloc(sizeof(Layer));
layer->n_neurons = n_neurons;
layer->in_size = in_size;
layer->actv = actv;
layer->neurons = (Neuron**) malloc(n_neurons*sizeof(Neuron*));
int i;
for (i=0; i<layer->n_neurons; i++) layer->neurons[i] = new_neuron(in_size, actv);
return layer;
}
void delete_layer(Layer * layer)
{
int i;
for (i=0; i<layer->n_neurons; i++) delete_neuron(layer->neurons[i]);
free(layer->neurons);
free(layer);
}
nn_float_t * layer_forward(Layer * layer, nn_float_t * input)
{
nn_float_t * output = (nn_float_t*) malloc(layer->n_neurons*sizeof(nn_float_t));
int i;
#if LAYER_PARALLEL
#pragma omp parallel for private(i) num_threads(LAYER_N_THREADS)
#endif
for(i=0; i<layer->n_neurons; i++) output[i] = neuron_forward(layer->neurons[i], input);
return output;
}
void print_layer(Layer * layer)
{
int i;
for (i=0; i<layer->n_neurons; i++){
printf("---Neuron[%d]---\n", i);
print_neuron(layer->neurons[i]);
}
}
Network * new_network(nn_size_t n_layers, nn_size_t * layers_sizes, nn_float_t (**layers_actvs)(nn_float_t), nn_size_t in_size)
{
Network * network = (Network*) malloc(sizeof(Network));
network->n_layers = n_layers;
network->in_size = in_size;
network->layers = (Layer**) malloc(network->n_layers*sizeof(Layer*));
int i;
network->layers[0] = new_layer(layers_sizes[0], network->in_size, layers_actvs[0]);
for (i=1; i<network->n_layers; i++) network->layers[i] = new_layer(layers_sizes[i], network->layers[i-1]->n_neurons, layers_actvs[i]);
return network;
}
void delete_network(Network * network)
{
int i;
for (i=0; i<network->n_layers; i++) delete_layer(network->layers[i]);
free(network->layers);
free(network);
}
Network * copy_network(Network * cur_network)
{
int i, j;
nn_size_t * layers_sizes = (nn_size_t*) malloc(cur_network->n_layers*sizeof(nn_size_t));
nn_float_t (**layers_actvs)(nn_float_t) = (nn_float_t (**)(nn_float_t)) malloc(cur_network->n_layers*sizeof(nn_float_t (*)(nn_float_t)));
for (i=0; i<cur_network->n_layers; i++){
layers_sizes[i] = cur_network->layers[i]->n_neurons;
layers_actvs[i] = cur_network->layers[i]->actv;
}
Network * network = new_network(cur_network->n_layers, layers_sizes, layers_actvs, cur_network->in_size);
for (i=0; i<cur_network->n_layers; i++){
for (j=0; j<cur_network->layers[i]->n_neurons; j++){
memcpy(
network->layers[i]->neurons[j]->weights,
cur_network->layers[i]->neurons[j]->weights,
(cur_network->layers[i]->neurons[j]->n_dim+1)*sizeof(nn_float_t)
);
}
}
free(layers_sizes);
free(layers_actvs);
return network;
}
nn_float_t * network_forward(Network * network, nn_float_t * in)
{
int i;
nn_float_t * cur_vect = layer_forward(network->layers[0], in), * next_vect = NULL;
for (i=1; i<network->n_layers; i++){
next_vect = layer_forward(network->layers[i], cur_vect);
free(cur_vect);
cur_vect = next_vect;
}
return next_vect;
}
void print_network(Network * network)
{
int i;
for (i=0; i<network->n_layers; i++){
printf("------Layer[%d]------\n", i);
print_layer(network->layers[i]);
}
}
nn_float_t relu(nn_float_t net)
{
if (net>=0.0) return net;
else return 0.0;
}
nn_float_t soft_relu(nn_float_t net)
{
return log(1.0 + exp(net));
}
nn_float_t step(nn_float_t net)
{
if (net>=0.0) return 1.0;
else return 0.0;
}
nn_float_t sigm(nn_float_t net)
{
return 1.0/(1.0 + exp(-net));
}
nn_float_t linear(nn_float_t net)
{
return net;
} |
omp_dsymm_batch.c | /**
* @file omp_dsymm_batch.c
*
* @brief BBLAS omp_dsymm_batch double routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_zsymm_batch.c normal z -> d, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define REAL
/**
Purpose
-------
<b>dsymm_batch</b> is an OpenMP version of dsymm_batch.
It performs one of the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i],
where alpha[i] and beta[i] are scalars, arrayA[i] is a symmetric matrix
and arrayB[i] and arrayC[i] are M[i] by N[i] matrices.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of side[0], uplo[0], M[0], N[0],
alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
side Array of <tt>enum BBLAS_SIDE</tt>.
Each element side[i] specifies whether the symmetric
matrix arrayA[i] appears on the left or right side of the
operation as follows:
- = 'BblasLeft' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i].
- = 'BblasRight' arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i].
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the symmetric matrix
arrayA[i] is to be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
arrayA[i] is to be referenced.
- = 'BblasLower' Only the lower triangular part of
arrayA[i] is to be referenced.
@param[in]
M Array of <tt>int</tt>.
Each element M[i] specifies the number of rows of the matrix arrayC[i].
M[i] must be greater than zero.
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of columns of the matrix arrayC[i].
N[i] must be greater than zero.
@param[in]
alpha Array of <tt>real_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a DOUBLE PRECISION matrix of
dimension lda[i] by Ka[i],
where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise.
When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i]
must contain the symmetric matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the upper triangular part of the symmetric matrix whilst
the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the lower triangular part of the symmetric matrix
whilst the strictly upper triangular part is not used.
When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must
contain the symmetric matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the upper triangular part of the symmetric matrix whilst
the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the lower triangular part of the symmetric matrix
whilst the strictly upper triangular part is not used.
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When side[i] = BblasLeft
then lda[i] must be at least max( 1, M[i] ),
otherwise lda[i] must be at least max( 1, N[i] ).
@param[in]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a DOUBLE PRECISION matrix of
dimension ldb[i] by N[i].
The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements.
@param[in]
ldb Array of <tt>int</tt>.
Each element ldb[i] specifies the first dimension of arrayB[i] as declared
in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ).
@param[in]
beta Array of <tt>real_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each element arrayC[i] is a pointer to a DOUBLE PRECISION matrix of
dimension ldc[i] by N[i].
Before entry, the leading M[i] by N[i] part of the arrayC[i] must
contain a matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix output.
@param[in]
ldc Array of <tt>int</tt>.
Each element ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. The value ldc[i] must be at least
max( 1, M[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith zymm in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_dsymm_batch(
const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo,
const int *M, const int *N, const double *alpha,
const double **arrayA, const int *lda,
const double **arrayB, const int *ldb,
const double *beta, double **arrayC,
const int *ldc, const int batch_count, const enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "dsymm_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((side[first_index] != BblasLeft) &&
(side[first_index] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_SIDE;
}
return;
}
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if (M[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_M;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (side[first_index] == BblasLeft)
{
LDA = M[first_index];
} else
{
LDA = N[first_index];
}
if (lda[first_index] < LDA)
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, M[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
if (ldc[first_index] < max(1, M[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (M[first_index] == 0 || N[first_index] == 0 ||
(alpha[first_index] == (double)0.0 &&
beta[first_index] == (double)1.0))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private( batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_dsymm */
cblas_dsymm(
BblasColMajor,
side[first_index],
uplo[first_index],
M[first_index],
N[first_index],
(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index],
(beta[first_index]),
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private( batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((side[batch_iter] != BblasLeft) &&
(side[batch_iter] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter);
info[batch_iter] = BBLAS_ERR_SIDE;
continue;
}
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if (M[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, batch_iter);
info[batch_iter] = BBLAS_ERR_M;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (side[batch_iter] == BblasLeft)
{
LDA = M[batch_iter];
} else
{
LDA = N[batch_iter];
}
if (lda[batch_iter] < LDA)
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, M[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter);
info[batch_iter] = BBLAS_ERR_LDB;
continue;
}
if (ldc[batch_iter] < max(1, M[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (M[batch_iter] == 0 || N[batch_iter] == 0 ||
(alpha[batch_iter] == (double)0.0 &&
beta[batch_iter] == (double)1.0))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_dsymm(
BblasColMajor,
side[batch_iter],
uplo[batch_iter],
M[batch_iter],
N[batch_iter],
(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter],
(beta[batch_iter]),
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef REAL
|
LinearSearch.c | #include "stdio.h"
#include "stdlib.h"
#include "omp.h"
#include "mpi.h"
FILE *fp;
void printArray(int *arr, int size);
int linearSearch(int x, int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
if (arr[i] == x)
return i;
}
return -1;
}
int linearSearchOMP(int x, int *arr, int size) {
int i, itr, minIndex, threads = omp_get_num_threads();
int *found = (int *)malloc(threads * sizeof(int));
#pragma omp for private(i)
for (i = 0; i < threads; i++) {
found[i] = -1;
}
#pragma omp for private(i)
for (i = 0; i < size; i++) {
if (arr[i] == x) {
if(found[omp_get_thread_num()] == -1)
found[omp_get_thread_num()] = i;
}
}
minIndex = size;
for (i = 0; i < threads; i++) {
if (found[i] != -1) {
if (found[i] < minIndex) minIndex = found[i];
}
}
free(found);
if(minIndex == size)
return -1;
else return minIndex;
}
int linearSearchMPI(int argc, char *argv[], int x, int *arr, int size) {
int rank, np, localsize, i, minIndex;
int *localdata, *found, *results;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
localsize = size / np;
found = (int *)malloc(1 * sizeof(int));
results = (int *)malloc(np * sizeof(int));
found[0] = -1;
for (i = rank * localsize; i < (rank + 1)*localsize; i++) {
if (arr[i] == x) {
found[0] = i;
break;
}
}
MPI_Gather(found, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);
/*
localsize = size / np;
localdata = (int *)malloc(localsize * sizeof(int));
found = (int *)malloc(1 * sizeof(int));
results = (int *)malloc(np * sizeof(int));
MPI_Scatter(arr, localsize, MPI_INT, localdata, localsize, MPI_INT, 0, MPI_COMM_WORLD);
found[0] = -1;
for (i = 0; i < localsize; i++) {
if (localdata[i] == x) {
found[0] = i;
break;
}
}
MPI_Gather(found, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(rank == 0)
printArray(results, np);*/
if (rank == 0) {
minIndex = size;
for (i = 0; i < np; i++) {
if (results[i] != -1) {
if (results[i] < minIndex) minIndex = results[i];
}
}
if (minIndex == size)minIndex = -1;
}
else minIndex = -2;
free(found);
free(results);
MPI_Finalize();
return minIndex;
}
void randomize(int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
arr[i] = rand();
}
}
int pickRandElem(int *arr, int size) {
int elem = size/2 + (rand()%(size/2));
elem = arr[elem];
return elem;
}
void printArray(int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
int is_correct(int elem, int index, int *arr) {
if (arr[index] == elem)return 1;
else printf("Returned this element: %d\n", arr[index]);
return 0;
}
int main(int argc, char *argv[]) {
if (argc != 3) {
//printf("Must specify size of array.\n");
return 0;
}
fp = fopen("LinearSearch.txt", "a");
int size = atoi(argv[2]), oper = atoi(argv[1]), randElem, index;
int *arr = (int *)malloc(size * sizeof(int));
double start, end;
randomize(arr, size);
randElem = pickRandElem(arr, size);
if (oper == 2) {
start = omp_get_wtime();
index = linearSearchMPI(argc, argv, randElem, arr, size);
if (index == -2) {
free(arr);
return 0;
}
end = omp_get_wtime();
fprintf(fp, "MPI Linear Search\n");
}
else if (oper == 1) {
start = omp_get_wtime();
index = linearSearchOMP(randElem, arr, size);
end = omp_get_wtime();
fprintf(fp, "OpenMP Linear Search\n");
}
else if (oper == 0) {
start = omp_get_wtime();
index = linearSearch(randElem, arr, size);
end = omp_get_wtime();
fprintf(fp, "Sequential Linear Search\n");
}
fprintf(fp, "Element: %d -> Index: %d\n", randElem, index);
fprintf(fp, "Time elapsed: %f\n", (end - start));
if (is_correct(randElem, index, arr))fprintf(fp, "Correct\n");
else fprintf(fp, "Incorrect\n");
free(arr);
return 0;
} |
fci_contract_nosym.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*
* Paticle permutation symmetry for 2e Hamiltonian only
* h2e[i,j,k,l] == h2e[k,l,i,j]
* h2e[i,j,k,l] =/= h2e[j,i,k,l] =/= h2e[i,j,l,k] ...
*/
#include <stdlib.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "np_helper/np_helper.h"
#include "fci.h"
#define CSUMTHR 1e-28
#define STRB_BLKSIZE 112
double FCI_t1ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
void FCIcontract_a_1e_nosym(double *h1e, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, i, a, sign;
size_t str0, str1;
double *pci0, *pci1;
double tmp;
_LinkT *tab;
_LinkT *clink = malloc(sizeof(_LinkT) * nlinka * nstra);
FCIcompress_link(clink, link_indexa, norb, nstra, nlinka);
for (str0 = 0; str0 < nstra; str0++) {
tab = clink + str0 * nlinka;
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]); // propagate from t1 to bra, through a^+ i
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci0 = ci0 + str0 * nstrb;
pci1 = ci1 + str1 * nstrb;
tmp = sign * h1e[a*norb+i];
for (k = 0; k < nstrb; k++) {
pci1[k] += tmp * pci0[k];
}
}
}
free(clink);
}
void FCIcontract_b_1e_nosym(double *h1e, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, i, a, sign;
size_t str0, str1;
double *pci1;
double tmp;
_LinkT *tab;
_LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nstrb);
FCIcompress_link(clink, link_indexb, norb, nstrb, nlinkb);
for (str0 = 0; str0 < nstra; str0++) {
pci1 = ci1 + str0 * nstrb;
for (k = 0; k < nstrb; k++) {
tab = clink + k * nlinkb;
tmp = ci0[str0*nstrb+k];
for (j = 0; j < nlinkb; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci1[str1] += sign * tmp * h1e[a*norb+i];
}
}
}
free(clink);
}
static void spread_a_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkT *clink_indexa)
{
ci1 += strb_id;
const int nnorb = norb * norb;
int j, k, i, a, str1, sign;
const _LinkT *tab = clink_indexa + stra_id * nlinka;
double *cp0, *cp1;
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
cp0 = t1 + a*norb+i; // propagate from t1 to bra, through a^+ i
cp1 = ci1 + str1*(size_t)nstrb;
if (sign > 0) {
for (k = 0; k < bcount; k++) {
cp1[k] += cp0[k*nnorb];
}
} else {
for (k = 0; k < bcount; k++) {
cp1[k] -= cp0[k*nnorb];
}
}
}
}
static void spread_b_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinkb, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
int j, i, a, str0, str1, sign;
const _LinkT *tab = clink_indexb + strb_id * nlinkb;
double *pci = ci1 + stra_id * (size_t)nstrb;
for (str0 = 0; str0 < bcount; str0++) {
for (j = 0; j < nlinkb; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
// propagate from t1 to bra, through a^+ i
pci[str1] += sign * t1[a*norb+i];
}
t1 += nnorb;
tab += nlinkb;
}
}
static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount_for_spread_a, int ncol_ci1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
double csum;
csum = FCI_t1ci_sf(ci0, t1, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb,
clink_indexa, clink_indexb);
if (csum > CSUMTHR) {
dgemm_(&TRANS_N, &TRANS_N, &nnorb, &bcount, &nnorb,
&D1, eri, &nnorb, t1, &nnorb,
&D0, vt1, &nnorb);
spread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
spread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0,
norb, ncol_ci1buf, nlinka, clink_indexa);
}
}
static void axpy2d(double *out, double *in, int count, int no, int ni)
{
int i, j;
for (i = 0; i < count; i++) {
for (j = 0; j < ni; j++) {
out[i*no+j] += in[i*ni+j];
}
}
}
void FCIcontract_2es1(double *eri, double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
NPdset0(ci1, ((size_t)na) * nb);
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \
clinka, clinkb)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*norb*2+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
NPdset0(ci1buf, ((size_t)na) * blen);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf,
blen, blen, blen, strk, ib,
norb, na, nb, nlinka, nlinkb,
clinka, clinkb);
}
#pragma omp critical
axpy2d(ci1+ib, ci1buf, na, nb, blen);
#pragma omp barrier
}
free(ci1buf);
free(t1buf);
}
free(clinka);
free(clinkb);
}
|
omp_for_schedule_dynamic.c | // RUN: %libomp-compile-and-run
/*
* Test for dynamic scheduling with chunk size
* Method: caculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested chunk size
* unless it is the last one.
* It is possible for two adjacent chunks are assigned to the same thread
* Modified by Chunhua Liao
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#define CFDMAX_SIZE 100
const int chunk_size = 7;
int test_omp_for_schedule_dynamic()
{
int tid;
int *tids;
int i;
int tidsArray[CFDMAX_SIZE];
int count = 0;
int tmp_count = 0; /*dispatch times*/
int *tmp; /*store chunk size for each dispatch*/
int result = 0;
tids = tidsArray;
#pragma omp parallel private(tid) shared(tids)
{ /* begin of parallel */
int tid;
tid = omp_get_thread_num ();
#pragma omp for schedule(dynamic,chunk_size)
for (i = 0; i < CFDMAX_SIZE; i++) {
tids[i] = tid;
}
}
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tids[i] != tids[i + 1]) {
count++;
}
}
tmp = (int *) malloc (sizeof (int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tmp_count > count) {
printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */
break;
}
if (tids[i] != tids[i + 1]) {
tmp_count++;
tmp[tmp_count] = 1;
} else {
tmp[tmp_count]++;
}
}
/* is dynamic statement working? */
for (i = 0; i < count; i++) {
if ((tmp[i]%chunk_size)!=0) {
/* it is possible for 2 adjacent chunks assigned to a same thread */
result++;
fprintf(stderr,"The intermediate dispatch has wrong chunksize.\n");
/* result += ((tmp[i] / chunk_size) - 1); */
}
}
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) {
result++;
fprintf(stderr,"the last dispatch has wrong chunksize.\n");
}
/* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_dynamic()) {
num_failed++;
}
}
return num_failed;
}
|
random.c | /******************************************************************************
* *
* RANDOM.C *
* *
* WRAPPERS FOR RANDOM NUMBER GENERATOR *
* *
******************************************************************************/
#include "decs.h"
static gsl_rng **rng;
// Use Mersenne twister
void init_random(int seed) {
rng = safe_malloc(nthreads * sizeof(gsl_rng *));
#pragma omp parallel
{
rng[omp_get_thread_num()] = gsl_rng_alloc(gsl_rng_mt19937);
gsl_rng_set(rng[omp_get_thread_num()], seed + omp_get_thread_num());
}
}
double get_rand() { return gsl_rng_uniform(rng[omp_get_thread_num()]); }
double get_chisq(double nu) {
return gsl_ran_chisq(rng[omp_get_thread_num()], nu);
}
void get_ran_dir_3d(double *nx, double *ny, double *nz) {
gsl_ran_dir_3d(rng[omp_get_thread_num()], nx, ny, nz);
}
double get_gaussian(double mu, double sigma) {
double x = gsl_ran_gaussian(rng[omp_get_thread_num()], sigma);
double z = mu + x;
return z;
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads);
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
int errorCount = 0;
#pragma omp parallel num_threads(threads) reduction(+: errorCount)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
EIGEN_TRY {
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
} EIGEN_CATCH(...) {
++errorCount;
}
}
if (errorCount) EIGEN_THROW_X(Eigen::eigen_assert_exception());
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
/// \brief Returns the captured statement associated with the
/// component region within the (combined) directive.
//
// \param RegionKind Component region kind.
CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(std::any_of(
CaptureRegions.begin(), CaptureRegions.end(),
[=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
if (ThisCaptureRegion == RegionKind)
return CS;
CS = cast<CapturedStmt>(CS->getCapturedStmt());
}
llvm_unreachable("Incorrect RegionKind specified for directive.");
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range(child_iterator(), child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if the construct has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 10 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
PreInitsOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
// The following 12 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
NumIterationsOffset = 16,
PrevLowerBoundVariableOffset = 17,
PrevUpperBoundVariableOffset = 18,
DistIncOffset = 19,
PrevEnsureUpperBoundOffset = 20,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 21,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
Expr **Storage = reinterpret_cast<Expr **>(&*std::next(
child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return (isOpenMPWorksharingDirective(Kind) ||
isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters,
// PrivateCounters, Inits,
// Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setPreInits(Stmt *PreInits) {
*std::next(child_begin(), PreInitsOffset) = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NumIterationsOffset) = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), DistIncOffset) = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// \brief PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// \brief DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// \brief PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
PreInits = nullptr;
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getPreInits() const {
return *std::next(child_begin(), PreInitsOffset);
}
Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NumIterationsOffset)));
}
Expr *getPrevLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevLowerBoundVariableOffset)));
}
Expr *getPrevUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevUpperBoundVariableOffset)));
}
Expr *getDistInc() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), DistIncOffset)));
}
Expr *getPrevEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevEnsureUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1),
HasCancel(false) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, NumClauses, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPCriticalDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), NumClauses,
1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if this directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPOrderedDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, StartLoc, EndLoc, NumClauses,
1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetEnterDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetEnterDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetExitDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetExitDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, NumClauses, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
explicit OMPCancelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), NumClauses,
0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, SourceLocation(), SourceLocation(),
CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
StartLoc, EndLoc, CollapsedNum, NumClauses)
{}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses)
{}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// \brief This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, StartLoc, EndLoc, NumClauses,
0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetUpdateDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, SourceLocation(),
SourceLocation(), NumClauses, 0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetUpdateDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass,
OMPD_target_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd,
SourceLocation(),SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
} // end namespace clang
#endif
|
DRB073-doall2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
Two-dimensional array computation using loops: missing private(j).
References to j in the loop cause data races.
Data race pairs (we allow multiple ones to preserve the pattern):
Write_set = {j@61:10, j@61:20}
Read_set = {j@62:20, j@62:12, j61@:14, j61@:20}
Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair.
*/
int a[100][100];
int main()
{
int i,j;
#pragma omp parallel for
for (i=0;i<100;i++)
for (j=0;j<100;j++)
a[i][j]=a[i][j]+1;
return 0;
}
|
task_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void foo();
// CHECK-LABEL: @main
int main() {
// CHECK: call i32 @__kmpc_global_thread_num(
// CHECK: call i8* @__kmpc_omp_task_alloc(
// CHECK: call i32 @__kmpc_omp_task(
#pragma omp task
{
#pragma omp taskgroup
{
#pragma omp task
foo();
}
}
// CHECK: ret i32 0
return 0;
}
// CHECK: call void @__kmpc_taskgroup(
// CHECK: call i8* @__kmpc_omp_task_alloc(
// CHECK: call i32 @__kmpc_omp_task(
// CHECK: call void @__kmpc_end_taskgroup(
// CHECK-LINE: @bar
void bar() {
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp for
for (int i = 0; i < 10; ++i)
// CHECK: [[BUF:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i32 1, i64 48,
// CHECK: [[BC_BUF:%.+]] = bitcast i8* [[BUF]] to [[TT_WITH_PRIVS:%.+]]*
// CHECK: [[PRIVS:%.+]] = getelementptr inbounds [[TT_WITH_PRIVS]], [[TT_WITH_PRIVS]]* [[BC_BUF]], i32 0, i32 1
// CHECK: [[I_PRIV:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}} [[PRIVS]], i32 0, i32 0
// CHECK: store i32 %{{.+}}, i32* [[I_PRIV]],
// CHECK: = call i32 @__kmpc_omp_task(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i8* [[BUF]])
#pragma omp task
++i;
}
#endif
|
hello.c | #include <stdio.h>
#if defined (_OPENMP)
#include <omp.h>
#endif
int main(int argc, char *argv[]) {
int myrank=0;
int mysize=1;
#if defined (_OPENMP)
#pragma omp parallel default(shared) private(myrank, mysize)
{
mysize = omp_get_num_threads();
myrank = omp_get_thread_num();
#endif
printf("Hello from thread %d out of %d\n", myrank,
mysize);
#if defined (_OPENMP)
}
#endif
return 0;
}
|
GB_unop__identity_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_fp64
// op(A') function: GB_unop_tran__identity_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.